static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) cfg(priv)->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE; hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE; hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ); hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant); if (cfg(priv)->rx_with_siso_diversity) hw_params(priv).rx_chains_num = 1; else hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant); hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; iwl2000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ hw_params(priv).sens = &iwl2000_sensitivity; return 0; }
static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) priv->cfg->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE; hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE; hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; iwl5000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ /* Set initial calibration set */ hw_params(priv).sens = &iwl5000_sensitivity; hw_params(priv).calib_init_cfg = BIT(IWL_CALIB_XTAL) | BIT(IWL_CALIB_LO) | BIT(IWL_CALIB_TX_IQ) | BIT(IWL_CALIB_TX_IQ_PERD) | BIT(IWL_CALIB_BASE_BAND); return 0; }
/****************************************************************************** * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Linux Wireless <*****@*****.**> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/sched.h> #include <linux/slab.h> #include <net/mac80211.h> #include "iwl-eeprom.h" #include "iwl-debug.h" #include "iwl-core.h" #include "iwl-io.h" #include "iwl-power.h" #include "iwl-agn.h" #include "iwl-shared.h" #include "iwl-agn.h" #include "iwl-trans.h" const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ static void iwl_init_ht_hw_capab(const struct iwl_priv *priv, struct ieee80211_sta_ht_cap *ht_info, enum ieee80211_band band) { u16 max_bit_rate = 0; u8 rx_chains_num = hw_params(priv).rx_chains_num; u8 tx_chains_num = hw_params(priv).tx_chains_num; ht_info->cap = 0; memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); ht_info->ht_supported = true; if (priv->cfg->ht_params && priv->cfg->ht_params->ht_greenfield_support) ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; ht_info->cap |= IEEE80211_HT_CAP_SGI_20; max_bit_rate = MAX_BIT_RATE_20_MHZ; if (hw_params(priv).ht40_channel & BIT(band)) { ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; ht_info->cap |= IEEE80211_HT_CAP_SGI_40; ht_info->mcs.rx_mask[4] = 0x01; max_bit_rate = MAX_BIT_RATE_40_MHZ; } if (iwlagn_mod_params.amsdu_size_8K) ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_factor) ht_info->ampdu_factor = priv->cfg->bt_params->ampdu_factor; ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_density) ht_info->ampdu_density = priv->cfg->bt_params->ampdu_density; ht_info->mcs.rx_mask[0] = 0xFF; if (rx_chains_num >= 2) ht_info->mcs.rx_mask[1] = 0xFF; if (rx_chains_num >= 3) ht_info->mcs.rx_mask[2] = 0xFF; /* Highest supported Rx data rate */ max_bit_rate *= rx_chains_num; WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); /* Tx MCS capabilities */ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; if (tx_chains_num != rx_chains_num) { ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; ht_info->mcs.tx_params |= ((tx_chains_num - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); } }
static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id) { if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE) return false; return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE + hw_params(trans).num_ampdu_queues); }
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u8 txq_id = trans_pcie->agg_txq[sta_id][tid]; if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) { IWL_ERR(trans, "queue number out of range: %d, must be %d to %d\n", txq_id, IWLAGN_FIRST_AMPDU_QUEUE, IWLAGN_FIRST_AMPDU_QUEUE + hw_params(trans).num_ampdu_queues - 1); return -EINVAL; } iwlagn_tx_queue_stop_scheduler(trans, txq_id); iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id)); trans_pcie->agg_txq[sta_id][tid] = 0; trans_pcie->txq[txq_id].q.read_ptr = 0; trans_pcie->txq[txq_id].q.write_ptr = 0; /* supposes that ssn_idx is valid (!= 0xFFF) */ iwl_trans_set_wr_ptrs(trans, txq_id, 0); iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); iwl_txq_ctx_deactivate(trans_pcie, txq_id); iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0); return 0; }
static void iwl5150_set_ct_threshold(struct iwl_priv *priv) { const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) - iwl_temp_calib_to_offset(priv->shrd); hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef; }
static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, int dynps_ms, int wakeup_period) { /* * These are the original power level 3 sleep successions. The * device may behave better with such succession and was also * only tested with that. Just like the original sleep commands, * also adjust the succession here to the wakeup_period below. * The ranges are the same as for the sleep commands, 0-2, 3-9 * and >10, which is selected based on the DTIM interval for * the sleep index but here we use the wakeup period since that * is what we need to do for the latency requirements. */ static const u8 slp_succ_r0[IWL_POWER_VEC_SIZE] = { 2, 2, 2, 2, 2 }; static const u8 slp_succ_r1[IWL_POWER_VEC_SIZE] = { 2, 4, 6, 7, 9 }; static const u8 slp_succ_r2[IWL_POWER_VEC_SIZE] = { 2, 7, 9, 9, 0xFF }; const u8 *slp_succ = slp_succ_r0; int i; if (wakeup_period > IWL_DTIM_RANGE_0_MAX) slp_succ = slp_succ_r1; if (wakeup_period > IWL_DTIM_RANGE_1_MAX) slp_succ = slp_succ_r2; memset(cmd, 0, sizeof(*cmd)); cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK | IWL_POWER_FAST_PD; /* no use seeing frames for others */ if (priv->power_data.bus_pm) cmd->flags |= IWL_POWER_PCI_PM_MSK; if (hw_params(priv).shadow_reg_enable) cmd->flags |= IWL_POWER_SHADOW_REG_ENA; else cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { if (!priv->cfg->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; } cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms); cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms); for (i = 0; i < IWL_POWER_VEC_SIZE; i++) cmd->sleep_interval[i] = cpu_to_le32(min_t(int, slp_succ[i], wakeup_period)); IWL_DEBUG_POWER(priv, "Automatic sleep command\n"); }
static struct iwl_link_quality_cmd * iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id) { int i, r; struct iwl_link_quality_cmd *link_cmd; u32 rate_flags = 0; __le32 rate_n_flags; link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL); if (!link_cmd) { IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n"); return NULL; } lockdep_assert_held(&priv->shrd->mutex); /* Set up the rate scaling to start at selected rate, fall back * all the way down to 1M in IEEE order, and then spin on 1M */ if (priv->band == IEEE80211_BAND_5GHZ) r = IWL_RATE_6M_INDEX; else if (ctx && ctx->vif && ctx->vif->p2p) r = IWL_RATE_6M_INDEX; else r = IWL_RATE_1M_INDEX; if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) rate_flags |= RATE_MCS_CCK_MSK; rate_flags |= first_antenna(hw_params(priv).valid_tx_ant) << RATE_MCS_ANT_POS; rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) link_cmd->rs_table[i].rate_n_flags = rate_n_flags; link_cmd->general_params.single_stream_ant_msk = first_antenna(hw_params(priv).valid_tx_ant); link_cmd->general_params.dual_stream_ant_msk = hw_params(priv).valid_tx_ant & ~first_antenna(hw_params(priv).valid_tx_ant); if (!link_cmd->general_params.dual_stream_ant_msk) { link_cmd->general_params.dual_stream_ant_msk = ANT_AB; } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) { link_cmd->general_params.dual_stream_ant_msk = hw_params(priv).valid_tx_ant; } link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; link_cmd->agg_params.agg_time_limit = cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); link_cmd->sta_id = sta_id; return link_cmd; }
/** * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue */ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, struct iwl_rx_queue *q) { unsigned long flags; u32 reg; spin_lock_irqsave(&q->lock, flags); if (q->need_update == 0) goto exit_unlock; if (hw_params(trans).shadow_reg_enable) { /* shadow register enabled */ /* Device expects a multiple of 8 */ q->write_actual = (q->write & ~0x7); iwl_write32(bus(trans), FH_RSCSR_CHNL0_WPTR, q->write_actual); } else { /* If power-saving is in use, make sure device is awake */ if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) { reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup," " GP1 = 0x%x\n", reg); iwl_set_bit(bus(trans), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); goto exit_unlock; } q->write_actual = (q->write & ~0x7); iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR, q->write_actual); /* Else device is assumed to be awake */ } else { /* Device expects a multiple of 8 */ q->write_actual = (q->write & ~0x7); iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR, q->write_actual); } } q->need_update = 0; exit_unlock: spin_unlock_irqrestore(&q->lock, flags); }
/* * Not a mac80211 entry point function, but it fits in with all the * other mac80211 functions grouped here. */ int iwlagn_mac_setup_register(struct iwl_priv *priv, const struct iwl_ucode_capabilities *capa) { int ret; struct ieee80211_hw *hw = priv->hw; struct iwl_rxon_context *ctx; hw->rate_control_algorithm = "iwl-agn-rs"; /* Tell mac80211 our characteristics */ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_REPORTS_TX_ACK_STATUS; /* * Including the following line will crash some AP's. This * workaround removes the stimulus which causes the crash until * the AP software can be fixed. hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; */ hw->flags |= IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS | IEEE80211_HW_SCAN_WHILE_IDLE; if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE) hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | IEEE80211_HW_SUPPORTS_STATIC_SMPS; #ifndef CONFIG_IWLWIFI_EXPERIMENTAL_MFP /* enable 11w if the uCode advertise */ if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP) #endif /* !CONFIG_IWLWIFI_EXPERIMENTAL_MFP */ hw->flags |= IEEE80211_HW_MFP_CAPABLE; hw->sta_data_size = sizeof(struct iwl_station_priv); hw->vif_data_size = sizeof(struct iwl_vif_priv); for_each_context(priv, ctx) { hw->wiphy->interface_modes |= ctx->interface_modes; hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; }
static void iwl1000_hw_set_hw_params(struct iwl_priv *priv) { hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ); hw_params(priv).tx_chains_num = num_of_ant(hw_params(priv).valid_tx_ant); if (cfg(priv)->rx_with_siso_diversity) hw_params(priv).rx_chains_num = 1; else hw_params(priv).rx_chains_num = num_of_ant(hw_params(priv).valid_rx_ant); iwl1000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ hw_params(priv).sens = &iwl1000_sensitivity; }
static void iwl5150_hw_set_hw_params(struct iwl_priv *priv) { hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); hw_params(priv).tx_chains_num = num_of_ant(hw_params(priv).valid_tx_ant); hw_params(priv).rx_chains_num = num_of_ant(hw_params(priv).valid_rx_ant); iwl5150_set_ct_threshold(priv); /* Set initial sensitivity parameters */ hw_params(priv).sens = &iwl5150_sensitivity; }
/** * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom */ int iwl_init_geos(struct iwl_priv *priv) { struct iwl_channel_info *ch; struct ieee80211_supported_band *sband; struct ieee80211_channel *channels; struct ieee80211_channel *geo_ch; struct ieee80211_rate *rates; int i = 0; s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN; if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n"); set_bit(STATUS_GEO_CONFIGURED, &priv->status); return 0; } channels = kcalloc(priv->channel_count, sizeof(struct ieee80211_channel), GFP_KERNEL); if (!channels) return -ENOMEM; rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate), GFP_KERNEL); if (!rates) { kfree(channels); return -ENOMEM; } /* 5.2GHz channels start after the 2.4GHz channels */ sband = &priv->bands[IEEE80211_BAND_5GHZ]; sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; /* just OFDM */ sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE) iwl_init_ht_hw_capab(priv, &sband->ht_cap, IEEE80211_BAND_5GHZ); sband = &priv->bands[IEEE80211_BAND_2GHZ]; sband->channels = channels; /* OFDM & CCK */ sband->bitrates = rates; sband->n_bitrates = IWL_RATE_COUNT_LEGACY; if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE) iwl_init_ht_hw_capab(priv, &sband->ht_cap, IEEE80211_BAND_2GHZ); priv->ieee_channels = channels; priv->ieee_rates = rates; for (i = 0; i < priv->channel_count; i++) { ch = &priv->channel_info[i]; /* FIXME: might be removed if scan is OK */ if (!is_channel_valid(ch)) continue; sband = &priv->bands[ch->band]; geo_ch = &sband->channels[sband->n_channels++]; geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel, ch->band); geo_ch->max_power = ch->max_power_avg; geo_ch->max_antenna_gain = 0xff; geo_ch->hw_value = ch->channel; if (is_channel_valid(ch)) { if (!(ch->flags & EEPROM_CHANNEL_IBSS)) geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; if (ch->flags & EEPROM_CHANNEL_RADAR) geo_ch->flags |= IEEE80211_CHAN_RADAR; geo_ch->flags |= ch->ht40_extension_channel; if (ch->max_power_avg > max_tx_power) max_tx_power = ch->max_power_avg; } else { geo_ch->flags |= IEEE80211_CHAN_DISABLED; } IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel, geo_ch->center_freq, is_channel_a_band(ch) ? "5.2" : "2.4", geo_ch->flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid", geo_ch->flags); } priv->tx_power_device_lmt = max_tx_power; priv->tx_power_user_lmt = max_tx_power; priv->tx_power_next = max_tx_power; if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && hw_params(priv).sku & EEPROM_SKU_CAP_BAND_52GHZ) { IWL_INFO(priv, "Incorrectly detected BG card as ABG. " "Please send your %s to maintainer.\n", trans(priv)->hw_id_str); hw_params(priv).sku &= ~EEPROM_SKU_CAP_BAND_52GHZ; } IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n", priv->bands[IEEE80211_BAND_2GHZ].n_channels, priv->bands[IEEE80211_BAND_5GHZ].n_channels); set_bit(STATUS_GEO_CONFIGURED, &priv->status); return 0; }
static void iwl5000_set_ct_threshold(struct iwl_priv *priv) { /* want Celsius */ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; }
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, enum iwl_rxon_context_id ctx, int sta_id, int tid, int frame_limit, u16 ssn) { int tx_fifo, txq_id; u16 ra_tid; unsigned long flags; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (WARN_ON(sta_id == IWL_INVALID_STATION)) return; if (WARN_ON(tid >= IWL_MAX_TID_COUNT)) return; tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid); if (WARN_ON(tx_fifo < 0)) { IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo); return; } txq_id = trans_pcie->agg_txq[sta_id][tid]; if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) { IWL_ERR(trans, "queue number out of range: %d, must be %d to %d\n", txq_id, IWLAGN_FIRST_AMPDU_QUEUE, IWLAGN_FIRST_AMPDU_QUEUE + hw_params(trans).num_ampdu_queues - 1); return; } ra_tid = BUILD_RAxTID(sta_id, tid); spin_lock_irqsave(&trans_pcie->irq_lock, flags); iwlagn_tx_queue_stop_scheduler(trans, txq_id); iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id)); iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id)); trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); iwl_trans_set_wr_ptrs(trans, txq_id, ssn); iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], tx_fifo, 1); trans_pcie->txq[txq_id].sta_id = sta_id; trans_pcie->txq[txq_id].tid = tid; spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); }
static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, __le16 fc) { u32 rate_flags; int rate_idx; u8 rts_retry_limit; u8 data_retry_limit; u8 rate_plcp; if (priv->shrd->wowlan) { rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT; data_retry_limit = IWLAGN_LOW_RETRY_LIMIT; } else { /* Set retry limit on RTS packets */ rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT; /* Set retry limit on DATA packets and Probe Responses*/ if (ieee80211_is_probe_resp(fc)) { data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT; rts_retry_limit = min(data_retry_limit, rts_retry_limit); } else if (ieee80211_is_back_req(fc)) data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT; else data_retry_limit = IWLAGN_DEFAULT_TX_RETRY; } tx_cmd->data_retry_limit = data_retry_limit; tx_cmd->rts_retry_limit = rts_retry_limit; /* DATA packets will use the uCode station table for rate/antenna * selection */ if (ieee80211_is_data(fc)) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; #ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL if (priv->tm_fixed_rate) { /* * rate overwrite by testmode * we not only send lq command to change rate * we also re-enforce per data pkt base. */ tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK; memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate, sizeof(tx_cmd->rate_n_flags)); } #endif return; } /** * If the current TX rate stored in mac80211 has the MCS bit set, it's * not really a TX rate. Thus, we use the lowest supported rate for * this band. Also use the lowest supported rate if the stored rate * index is invalid. */ rate_idx = info->control.rates[0].idx; if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) rate_idx = rate_lowest_index(&priv->bands[info->band], info->control.sta); /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ if (info->band == IEEE80211_BAND_5GHZ) rate_idx += IWL_FIRST_OFDM_RATE; /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = iwl_rates[rate_idx].plcp; /* Zero out flags for this packet */ rate_flags = 0; /* Set CCK flag as needed */ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) rate_flags |= RATE_MCS_CCK_MSK; /* Set up antennas */ if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist && priv->bt_full_concurrent) { /* operated as 1x1 in full concurrency mode */ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, first_antenna(hw_params(priv).valid_tx_ant)); } else priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, hw_params(priv).valid_tx_ant); rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); /* Set the rate in the TX cmd */ tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); }
/** * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA * * Handles block-acknowledge notification from device, which reports success * of frames sent via aggregation. */ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; struct iwl_ht_agg *agg; struct sk_buff_head reclaimed_skbs; struct ieee80211_tx_info *info; struct ieee80211_hdr *hdr; struct sk_buff *skb; unsigned long flags; int sta_id; int tid; int freed; /* "flow" corresponds to Tx queue */ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); /* "ssn" is start of block-ack Tx window, corresponds to index * (in Tx queue's circular buffer) of first TFD/frame in window */ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); if (scd_flow >= hw_params(priv).max_txq_num) { IWL_ERR(priv, "BUG_ON scd_flow is bigger than number of queues\n"); return 0; } sta_id = ba_resp->sta_id; tid = ba_resp->tid; agg = &priv->shrd->tid_data[sta_id][tid].agg; spin_lock_irqsave(&priv->shrd->sta_lock, flags); if (unlikely(agg->txq_id != scd_flow)) { /* * FIXME: this is a uCode bug which need to be addressed, * log the information and return for now! * since it is possible happen very often and in order * not to fill the syslog, don't enable the logging by default */ IWL_DEBUG_TX_REPLY(priv, "BA scd_flow %d does not match txq_id %d\n", scd_flow, agg->txq_id); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; } if (unlikely(!agg->wait_for_ba)) { if (unlikely(ba_resp->bitmap)) IWL_ERR(priv, "Received BA when not expected\n"); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; } IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n", agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32, ba_resp->sta_id); IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, " "scd_flow = %d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl, (unsigned long long)le64_to_cpu(ba_resp->bitmap), scd_flow, ba_resp_scd_ssn); /* Mark that the expected block-ack response arrived */ agg->wait_for_ba = false; /* Sanity check values reported by uCode */ if (ba_resp->txed_2_done > ba_resp->txed) { IWL_DEBUG_TX_REPLY(priv, "bogus sent(%d) and ack(%d) count\n", ba_resp->txed, ba_resp->txed_2_done); /* * set txed_2_done = txed, * so it won't impact rate scale */ ba_resp->txed = ba_resp->txed_2_done; } IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n", ba_resp->txed, ba_resp->txed_2_done); __skb_queue_head_init(&reclaimed_skbs); /* Release all TFDs before the SSN, i.e. all TFDs in front of * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn, 0, &reclaimed_skbs); freed = 0; while (!skb_queue_empty(&reclaimed_skbs)) { skb = __skb_dequeue(&reclaimed_skbs); hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_data_qos(hdr->frame_control)) freed++; else WARN_ON_ONCE(1); info = IEEE80211_SKB_CB(skb); kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1])); if (freed == 1) { /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ info = IEEE80211_SKB_CB(skb); memset(&info->status, 0, sizeof(info->status)); info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_AMPDU; info->status.ampdu_ack_len = ba_resp->txed_2_done; info->status.ampdu_len = ba_resp->txed; iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); } ieee80211_tx_status_irqsafe(priv->hw, skb); } spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; }
static void iwl_static_sleep_cmd(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, enum iwl_power_level lvl, int period) { const struct iwl_power_vec_entry *table; int max_sleep[IWL_POWER_VEC_SIZE] = { 0 }; int i; u8 skip; u32 slp_itrvl; if (priv->cfg->adv_pm) { table = apm_range_2; if (period <= IWL_DTIM_RANGE_1_MAX) table = apm_range_1; if (period <= IWL_DTIM_RANGE_0_MAX) table = apm_range_0; } else { table = range_2; if (period <= IWL_DTIM_RANGE_1_MAX) table = range_1; if (period <= IWL_DTIM_RANGE_0_MAX) table = range_0; } if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM)) memset(cmd, 0, sizeof(*cmd)); else *cmd = table[lvl].cmd; if (period == 0) { skip = 0; period = 1; for (i = 0; i < IWL_POWER_VEC_SIZE; i++) max_sleep[i] = 1; } else { skip = table[lvl].no_dtim; for (i = 0; i < IWL_POWER_VEC_SIZE; i++) max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]); max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1; } slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); /* figure out the listen interval based on dtim period and skip */ if (slp_itrvl == 0xFF) cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = cpu_to_le32(period * (skip + 1)); slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); if (slp_itrvl > period) cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = cpu_to_le32((slp_itrvl / period) * period); if (skip) cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; else cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; if (hw_params(priv).shadow_reg_enable) cmd->flags |= IWL_POWER_SHADOW_REG_ENA; else cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { if (!priv->cfg->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; } slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL) cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL); /* enforce max sleep interval */ for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) { if (le32_to_cpu(cmd->sleep_interval[i]) > (max_sleep[i] * period)) cmd->sleep_interval[i] = cpu_to_le32(max_sleep[i] * period); if (i != (IWL_POWER_VEC_SIZE - 1)) { if (le32_to_cpu(cmd->sleep_interval[i]) > le32_to_cpu(cmd->sleep_interval[i+1])) cmd->sleep_interval[i] = cmd->sleep_interval[i+1]; } } if (priv->power_data.bus_pm) cmd->flags |= IWL_POWER_PCI_PM_MSK; else cmd->flags &= ~IWL_POWER_PCI_PM_MSK; IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n", skip, period); IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1); }
static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_device_cmd *cmd; unsigned long flags; int len, err; u16 sequence; struct iwl_rx_cmd_buffer rxcb; struct iwl_rx_packet *pkt; bool reclaim; int index, cmd_index; if (WARN_ON(!rxb)) return; rxcb.truesize = PAGE_SIZE << hw_params(trans).rx_page_order; dma_unmap_page(trans->dev, rxb->page_dma, rxcb.truesize, DMA_FROM_DEVICE); rxcb._page = rxb->page; pkt = rxb_addr(&rxcb); IWL_DEBUG_RX(trans, "%s, 0x%02x\n", get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; len += sizeof(u32); /* */ trace_iwlwifi_dev_rx(trans->dev, pkt, len); /* */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); if (reclaim) { int i; for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) { reclaim = false; break; } } } sequence = le16_to_cpu(pkt->hdr.sequence); index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); if (reclaim) cmd = txq->cmd[cmd_index]; else cmd = NULL; err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); /* */ if (reclaim) { /* */ if (rxcb._page) iwl_tx_cmd_complete(trans, &rxcb, err); else IWL_WARN(trans, "Claim null rxb?\n"); } /* */ if (rxcb._page == NULL) rxb->page = NULL; /* */ spin_lock_irqsave(&rxq->lock, flags); if (rxb->page != NULL) { rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, PAGE_SIZE << hw_params(trans).rx_page_order, DMA_FROM_DEVICE); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } else list_add_tail(&rxb->list, &rxq->rx_used); spin_unlock_irqrestore(&rxq->lock, flags); }
/* tasklet for iwlagn interrupt */ void iwl_irq_tasklet(struct iwl_trans *trans) { u32 inta = 0; u32 handled = 0; unsigned long flags; u32 i; #ifdef CONFIG_IWLWIFI_DEBUG u32 inta_mask; #endif struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; spin_lock_irqsave(&trans->shrd->lock, flags); /* Ack/clear/reset pending uCode interrupts. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, */ /* There is a hardware bug in the interrupt mask function that some * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if * they are disabled in the CSR_INT_MASK register. Furthermore the * ICT interrupt handling mechanism has another bug that might cause * these unmasked interrupts fail to be detected. We workaround the * hardware bugs here by ACKing all the possible interrupts so that * interrupt coalescing can still be achieved. */ iwl_write32(bus(trans), CSR_INT, trans_pcie->inta | ~trans_pcie->inta_mask); inta = trans_pcie->inta; #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_get_debug_level(trans->shrd) & IWL_DL_ISR) { /* just for debug */ inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ", inta, inta_mask); } #endif spin_unlock_irqrestore(&trans->shrd->lock, flags); /* saved interrupt in inta variable now we can reset trans_pcie->inta */ trans_pcie->inta = 0; /* Now service all interrupt bits discovered above. */ if (inta & CSR_INT_BIT_HW_ERR) { IWL_ERR(trans, "Hardware error detected. Restarting.\n"); /* Tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); isr_stats->hw++; iwl_irq_handle_error(trans); handled |= CSR_INT_BIT_HW_ERR; return; } #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) { /* NIC fires this, but we don't use it, redundant with WAKEUP */ if (inta & CSR_INT_BIT_SCD) { IWL_DEBUG_ISR(trans, "Scheduler finished to transmit " "the frame/frames.\n"); isr_stats->sch++; } /* Alive notification via Rx interrupt will do the real work */ if (inta & CSR_INT_BIT_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; } } #endif /* Safely ignore these bits for debug checks below */ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); /* HW RF KILL switch toggled */ if (inta & CSR_INT_BIT_RF_KILL) { int hw_rf_kill = 0; if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) hw_rf_kill = 1; IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", hw_rf_kill ? "disable radio" : "enable radio"); isr_stats->rfkill++; /* driver only loads ucode once setting the interface up. * the driver allows loading the ucode even if the radio * is killed. Hence update the killswitch state here. The * rfkill handler will care about restarting if needed. */ if (!test_bit(STATUS_ALIVE, &trans->shrd->status)) { if (hw_rf_kill) set_bit(STATUS_RF_KILL_HW, &trans->shrd->status); else clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status); iwl_set_hw_rfkill_state(priv(trans), hw_rf_kill); } handled |= CSR_INT_BIT_RF_KILL; } /* Chip got too hot and stopped itself */ if (inta & CSR_INT_BIT_CT_KILL) { IWL_ERR(trans, "Microcode CT kill error detected.\n"); isr_stats->ctkill++; handled |= CSR_INT_BIT_CT_KILL; } /* Error detected by uCode */ if (inta & CSR_INT_BIT_SW_ERR) { IWL_ERR(trans, "Microcode SW error detected. " " Restarting 0x%X.\n", inta); isr_stats->sw++; iwl_irq_handle_error(trans); handled |= CSR_INT_BIT_SW_ERR; } /* uCode wakes up after power-down sleep */ if (inta & CSR_INT_BIT_WAKEUP) { IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); for (i = 0; i < hw_params(trans).max_txq_num; i++) iwl_txq_update_write_ptr(trans, &trans_pcie->txq[i]); isr_stats->wakeup++; handled |= CSR_INT_BIT_WAKEUP; } /* All uCode command responses, including Tx command responses, * Rx "responses" (frame-received notification), and other * notifications from uCode come through here*/ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | CSR_INT_BIT_RX_PERIODIC)) { IWL_DEBUG_ISR(trans, "Rx interrupt\n"); if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); iwl_write32(bus(trans), CSR_FH_INT_STATUS, CSR_FH_INT_RX_MASK); } if (inta & CSR_INT_BIT_RX_PERIODIC) { handled |= CSR_INT_BIT_RX_PERIODIC; iwl_write32(bus(trans), CSR_INT, CSR_INT_BIT_RX_PERIODIC); } /* Sending RX interrupt require many steps to be done in the * the device: * 1- write interrupt to current index in ICT table. * 2- dma RX frame. * 3- update RX shared data to indicate last write index. * 4- send interrupt. * This could lead to RX race, driver could receive RX interrupt * but the shared data changes does not reflect this; * periodic interrupt will detect any dangling Rx activity. */ /* Disable periodic interrupt; we use it as just a one-shot. */ iwl_write8(bus(trans), CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_DIS); iwl_rx_handle(trans); /* * Enable periodic interrupt in 8 msec only if we received * real RX interrupt (instead of just periodic int), to catch * any dangling Rx interrupt. If it was just the periodic * interrupt, there was no dangling Rx activity, and no need * to extend the periodic interrupt; one-shot is enough. */ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) iwl_write8(bus(trans), CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_ENA); isr_stats->rx++; } /* This "Tx" DMA channel is used only for loading uCode */ if (inta & CSR_INT_BIT_FH_TX) { iwl_write32(bus(trans), CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); isr_stats->tx++; handled |= CSR_INT_BIT_FH_TX; /* Wake up uCode load routine, now that load is complete */ trans->ucode_write_complete = 1; wake_up(&trans->shrd->wait_command_queue); } if (inta & ~handled) { IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); isr_stats->unhandled++; } if (inta & ~(trans_pcie->inta_mask)) { IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", inta & ~trans_pcie->inta_mask); } /* Re-enable all interrupts */ /* only Re-enable if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status)) iwl_enable_interrupts(trans); /* Re-enable RF_KILL if it occurred */ else if (handled & CSR_INT_BIT_RF_KILL) iwl_enable_rfkill_int(priv(trans)); }
/** * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free * * When moving to rx_free an SKB is allocated for the slot. * * Also restock the Rx queue via iwl_rx_queue_restock. * This is called as a scheduled work item (except for during initialization) */ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; struct list_head *element; struct iwl_rx_mem_buffer *rxb; struct page *page; unsigned long flags; gfp_t gfp_mask = priority; while (1) { spin_lock_irqsave(&rxq->lock, flags); if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); return; } spin_unlock_irqrestore(&rxq->lock, flags); if (rxq->free_count > RX_LOW_WATERMARK) gfp_mask |= __GFP_NOWARN; if (hw_params(trans).rx_page_order > 0) gfp_mask |= __GFP_COMP; /* Alloc a new receive buffer */ page = alloc_pages(gfp_mask, hw_params(trans).rx_page_order); if (!page) { if (net_ratelimit()) IWL_DEBUG_INFO(trans, "alloc_pages failed, " "order: %d\n", hw_params(trans).rx_page_order); if ((rxq->free_count <= RX_LOW_WATERMARK) && net_ratelimit()) IWL_CRIT(trans, "Failed to alloc_pages with %s." "Only %u free buffers remaining.\n", priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", rxq->free_count); /* We don't reschedule replenish work here -- we will * call the restock method and if it still needs * more buffers it will schedule replenish */ return; } spin_lock_irqsave(&rxq->lock, flags); if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); __free_pages(page, hw_params(trans).rx_page_order); return; } element = rxq->rx_used.next; rxb = list_entry(element, struct iwl_rx_mem_buffer, list); list_del(element); spin_unlock_irqrestore(&rxq->lock, flags); BUG_ON(rxb->page); rxb->page = page; /* Get physical address of the RB */ rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0, PAGE_SIZE << hw_params(trans).rx_page_order, DMA_FROM_DEVICE); /* dma address must be no more than 36 bits */ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); /* and also 256 byte aligned! */ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); spin_lock_irqsave(&rxq->lock, flags); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; spin_unlock_irqrestore(&rxq->lock, flags); } }
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, enum iwl_rxon_context_id ctx, int sta_id, int tid, int frame_limit, u16 ssn) { int tx_fifo, txq_id; u16 ra_tid; unsigned long flags; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (WARN_ON(sta_id == IWL_INVALID_STATION)) return; if (WARN_ON(tid >= IWL_MAX_TID_COUNT)) return; tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid); if (WARN_ON(tx_fifo < 0)) { IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo); return; } txq_id = trans_pcie->agg_txq[sta_id][tid]; if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) { IWL_ERR(trans, "queue number out of range: %d, must be %d to %d\n", txq_id, IWLAGN_FIRST_AMPDU_QUEUE, IWLAGN_FIRST_AMPDU_QUEUE + hw_params(trans).num_ampdu_queues - 1); return; } ra_tid = BUILD_RAxTID(sta_id, tid); spin_lock_irqsave(&trans_pcie->irq_lock, flags); /* Stop this Tx queue before configuring it */ iwlagn_tx_queue_stop_scheduler(trans, txq_id); /* Map receiver-address / traffic-ID to this queue */ iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); /* Set this queue as a chain-building queue */ iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id)); /* enable aggregations for the queue */ iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id)); /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); iwl_trans_set_wr_ptrs(trans, txq_id, ssn); /* Set up Tx window size and frame limit for this queue */ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], tx_fifo, 1); trans_pcie->txq[txq_id].sta_id = sta_id; trans_pcie->txq[txq_id].tid = tid; spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); }
static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_device_cmd *cmd; unsigned long flags; int len, err; u16 sequence; struct iwl_rx_cmd_buffer rxcb; struct iwl_rx_packet *pkt; bool reclaim; int index, cmd_index; if (WARN_ON(!rxb)) return; dma_unmap_page(trans->dev, rxb->page_dma, PAGE_SIZE << hw_params(trans).rx_page_order, DMA_FROM_DEVICE); rxcb._page = rxb->page; pkt = rxb_addr(&rxcb); IWL_DEBUG_RX(trans, "%s, 0x%02x\n", get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; len += sizeof(u32); /* account for status word */ trace_iwlwifi_dev_rx(trans->dev, pkt, len); /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. * If the packet (e.g. Rx frame) originated from uCode, * there is no command buffer to reclaim. * Ucode should set SEQ_RX_FRAME bit if ucode-originated, * but apparently a few don't get set; catch them here. */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); if (reclaim) { int i; for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) { reclaim = false; break; } } } sequence = le16_to_cpu(pkt->hdr.sequence); index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); if (reclaim) cmd = txq->cmd[cmd_index]; else cmd = NULL; err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); /* * XXX: After here, we should always check rxcb._page * against NULL before touching it or its virtual * memory (pkt). Because some rx_handler might have * already taken or freed the pages. */ if (reclaim) { /* Invoke any callbacks, transfer the buffer to caller, * and fire off the (possibly) blocking * iwl_trans_send_cmd() * as we reclaim the driver command queue */ if (rxcb._page) iwl_tx_cmd_complete(trans, &rxcb, err); else IWL_WARN(trans, "Claim null rxb?\n"); } /* page was stolen from us */ if (rxcb._page == NULL) rxb->page = NULL; /* Reuse the page if possible. For notification packets and * SKBs that fail to Rx correctly, add them back into the * rx_free list for reuse later. */ spin_lock_irqsave(&rxq->lock, flags); if (rxb->page != NULL) { rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, PAGE_SIZE << hw_params(trans).rx_page_order, DMA_FROM_DEVICE); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } else list_add_tail(&rxb->list, &rxq->rx_used); spin_unlock_irqrestore(&rxq->lock, flags); }
static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) { if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES && iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES) priv->cfg->base_params->num_of_queues = iwlagn_mod_params.num_of_queues; hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues; hw_params(priv).max_stations = IWLAGN_STATION_COUNT; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE; hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE; hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ); hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); if (priv->cfg->rx_with_siso_diversity) hw_params(priv).rx_chains_num = 1; else hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant; hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant; iwl2000_set_ct_threshold(priv); /* Set initial sensitivity parameters */ /* Set initial calibration set */ hw_params(priv).sens = &iwl2000_sensitivity; hw_params(priv).calib_init_cfg = BIT(IWL_CALIB_XTAL) | BIT(IWL_CALIB_LO) | BIT(IWL_CALIB_TX_IQ) | BIT(IWL_CALIB_BASE_BAND); if (priv->cfg->need_dc_calib) hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX; if (priv->cfg->need_temp_offset_calib) hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET); return 0; }
/** * iwl_rx_handle - Main entry function for receiving responses from uCode * * Uses the priv->rx_handlers callback function array to invoke * the appropriate handlers, including command responses, * frame-received notifications, and other notifications. */ static void iwl_rx_handle(struct iwl_trans *trans) { struct iwl_rx_mem_buffer *rxb; struct iwl_rx_packet *pkt; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue]; struct iwl_device_cmd *cmd; u32 r, i; int reclaim; unsigned long flags; u8 fill_rx = 0; u32 count = 8; int total_empty; int index, cmd_index; /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; i = rxq->read; /* Rx interrupt, but nothing sent from uCode */ if (i == r) IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i); /* calculate total frames need to be restock after handling RX */ total_empty = r - rxq->write_actual; if (total_empty < 0) total_empty += RX_QUEUE_SIZE; if (total_empty > (RX_QUEUE_SIZE / 2)) fill_rx = 1; while (i != r) { int len, err; u16 sequence; rxb = rxq->queue[i]; /* If an RXB doesn't have a Rx queue slot associated with it, * then a bug has been introduced in the queue refilling * routines -- catch it here */ if (WARN_ON(rxb == NULL)) { i = (i + 1) & RX_QUEUE_MASK; continue; } rxq->queue[i] = NULL; dma_unmap_page(bus(trans)->dev, rxb->page_dma, PAGE_SIZE << hw_params(trans).rx_page_order, DMA_FROM_DEVICE); pkt = rxb_addr(rxb); IWL_DEBUG_RX(trans, "r = %d, i = %d, %s, 0x%02x\n", r, i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; len += sizeof(u32); /* account for status word */ trace_iwlwifi_dev_rx(priv(trans), pkt, len); /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. * If the packet (e.g. Rx frame) originated from uCode, * there is no command buffer to reclaim. * Ucode should set SEQ_RX_FRAME bit if ucode-originated, * but apparently a few don't get set; catch them here. */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && (pkt->hdr.cmd != REPLY_RX) && (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && (pkt->hdr.cmd != REPLY_TX); sequence = le16_to_cpu(pkt->hdr.sequence); index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); if (reclaim) cmd = txq->cmd[cmd_index]; else cmd = NULL; /* warn if this is cmd response / notification and the uCode * didn't set the SEQ_RX_FRAME for a frame that is * uCode-originated * If you saw this code after the second half of 2012, then * please remove it */ WARN(pkt->hdr.cmd != REPLY_TX && reclaim == false && (!(pkt->hdr.sequence & SEQ_RX_FRAME)), "reclaim is false, SEQ_RX_FRAME unset: %s\n", get_cmd_string(pkt->hdr.cmd)); err = iwl_rx_dispatch(priv(trans), rxb, cmd); /* * XXX: After here, we should always check rxb->page * against NULL before touching it or its virtual * memory (pkt). Because some rx_handler might have * already taken or freed the pages. */ if (reclaim) { /* Invoke any callbacks, transfer the buffer to caller, * and fire off the (possibly) blocking * iwl_trans_send_cmd() * as we reclaim the driver command queue */ if (rxb->page) iwl_tx_cmd_complete(trans, rxb, err); else IWL_WARN(trans, "Claim null rxb?\n"); } /* Reuse the page if possible. For notification packets and * SKBs that fail to Rx correctly, add them back into the * rx_free list for reuse later. */ spin_lock_irqsave(&rxq->lock, flags); if (rxb->page != NULL) { rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page, 0, PAGE_SIZE << hw_params(trans).rx_page_order, DMA_FROM_DEVICE); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } else list_add_tail(&rxb->list, &rxq->rx_used); spin_unlock_irqrestore(&rxq->lock, flags); i = (i + 1) & RX_QUEUE_MASK; /* If there are a lot of unused frames, * restock the Rx queue so ucode wont assert. */ if (fill_rx) { count++; if (count >= 8) { rxq->read = i; iwlagn_rx_replenish_now(trans); count = 0; } } } /* Backtrack one entry */ rxq->read = i; if (fill_rx) iwlagn_rx_replenish_now(trans); else iwlagn_rx_queue_restock(trans); }
/** * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim * @handler_status: return value of the handler of the command * (put in setup_rx_handlers) * * If an Rx buffer has an async callback associated with it the callback * will be executed. The attached skb (if present) will only be freed * if the callback returns 1 */ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, int handler_status) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); int index = SEQ_TO_INDEX(sequence); int cmd_index; struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ if (WARN(txq_id != trans_pcie->cmd_queue, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", txq_id, trans_pcie->cmd_queue, sequence, trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr, trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; } spin_lock(&txq->lock); cmd_index = get_cmd_index(&txq->q, index); cmd = txq->cmd[cmd_index]; meta = &txq->meta[cmd_index]; txq->time_stamp = jiffies; iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { struct page *p = rxb_steal_page(rxb); meta->source->resp_pkt = pkt; meta->source->_rx_page_addr = (unsigned long)page_address(p); meta->source->_rx_page_order = hw_params(trans).rx_page_order; meta->source->handler_status = handler_status; } iwl_hcmd_queue_reclaim(trans, txq_id, index); if (!(meta->flags & CMD_ASYNC)) { if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { IWL_WARN(trans, "HCMD_ACTIVE already clear for command %s\n", get_cmd_string(cmd->hdr.cmd)); } clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", get_cmd_string(cmd->hdr.cmd)); wake_up(&trans->wait_command_queue); } meta->flags = 0; spin_unlock(&txq->lock); }
static void iwl6000_set_ct_threshold(struct iwl_priv *priv) { /* want Celsius */ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD; hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; }
static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; struct list_head *element; struct iwl_rx_mem_buffer *rxb; struct page *page; unsigned long flags; gfp_t gfp_mask = priority; while (1) { spin_lock_irqsave(&rxq->lock, flags); if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); return; } spin_unlock_irqrestore(&rxq->lock, flags); if (rxq->free_count > RX_LOW_WATERMARK) gfp_mask |= __GFP_NOWARN; if (hw_params(trans).rx_page_order > 0) gfp_mask |= __GFP_COMP; /* */ page = alloc_pages(gfp_mask, hw_params(trans).rx_page_order); if (!page) { if (net_ratelimit()) IWL_DEBUG_INFO(trans, "alloc_pages failed, " "order: %d\n", hw_params(trans).rx_page_order); if ((rxq->free_count <= RX_LOW_WATERMARK) && net_ratelimit()) IWL_CRIT(trans, "Failed to alloc_pages with %s." "Only %u free buffers remaining.\n", priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", rxq->free_count); /* */ return; } spin_lock_irqsave(&rxq->lock, flags); if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); __free_pages(page, hw_params(trans).rx_page_order); return; } element = rxq->rx_used.next; rxb = list_entry(element, struct iwl_rx_mem_buffer, list); list_del(element); spin_unlock_irqrestore(&rxq->lock, flags); BUG_ON(rxb->page); rxb->page = page; /* */ rxb->page_dma = dma_map_page(trans->dev, page, 0, PAGE_SIZE << hw_params(trans).rx_page_order, DMA_FROM_DEVICE); /* */ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); /* */ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); spin_lock_irqsave(&rxq->lock, flags); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; spin_unlock_irqrestore(&rxq->lock, flags); } }