int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq, dma_addr_t addr, u16 len, u8 reset, u8 pad) { int count; struct il_queue *q; struct il3945_tfd *tfd, *tfd_tmp; q = &txq->q; tfd_tmp = (struct il3945_tfd *)txq->tfds; tfd = &tfd_tmp[q->write_ptr]; if (reset) memset(tfd, 0, sizeof(*tfd)); count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); if (count >= NUM_TFD_CHUNKS || count < 0) { IL_ERR("Error can not send more than %d chunks\n", NUM_TFD_CHUNKS); return -EINVAL; } tfd->tbs[count].addr = cpu_to_le32(addr); tfd->tbs[count].len = cpu_to_le32(len); count++; tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad)); return 0; }
static ssize_t il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; char buf[8]; int buf_size; int ht40; memset(buf, 0, sizeof(buf)); buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; if (sscanf(buf, "%d", &ht40) != 1) return -EFAULT; if (!il_is_any_associated(il)) il->disable_ht40 = ht40 ? true : false; else { IL_ERR("Sta associated with AP - " "Change to 40MHz channel support is not allowed\n"); return -EINVAL; } return count; }
/** * il3945_txq_ctx_reset - Reset TX queue context * * Destroys all DMA structures and initialize them again */ static int il3945_txq_ctx_reset(struct il_priv *il) { int rc, txq_id; il3945_hw_txq_ctx_free(il); /* allocate tx queue structure */ rc = il_alloc_txq_mem(il); if (rc) return rc; /* Tx CMD queue */ rc = il3945_tx_reset(il); if (rc) goto error; /* Tx queue(s) */ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { rc = il_tx_queue_init(il, txq_id); if (rc) { IL_ERR("Tx %d queue init failed\n", txq_id); goto error; } } return rc; error: il3945_hw_txq_ctx_free(il); return rc; }
/** * il3945_hw_reg_txpower_get_temperature * get the current temperature by reading from NIC */ static int il3945_hw_reg_txpower_get_temperature(struct il_priv *il) { struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; int temperature; temperature = il3945_hw_get_temperature(il); /* driver's okay range is -260 to +25. * human readable okay range is 0 to +285 */ D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT); /* handle insane temp reading */ if (il3945_hw_reg_temp_out_of_range(temperature)) { IL_ERR("Error bad temperature value %d\n", temperature); /* if really really hot(?), * substitute the 3rd band/group's temp measured at factory */ if (il->last_temperature > 100) temperature = eeprom->groups[2].temperature; else /* else use most recent "sane" value from driver */ temperature = il->last_temperature; } return temperature; /* raw, not "human readable" */ }
static ssize_t il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { ssize_t ret; struct il_priv *il = file->private_data; int pos = 0, ofs = 0, buf_size = 0; const u8 *ptr; char *buf; u16 eeprom_ver; size_t eeprom_len = il->cfg->eeprom_size; buf_size = 4 * eeprom_len + 256; if (eeprom_len % 16) { IL_ERR("NVM size is not multiple of 16.\n"); return -ENODATA; } ptr = il->eeprom; if (!ptr) { IL_ERR("Invalid EEPROM memory\n"); return -ENOMEM; } /* 4 characters for byte 0xYY */ buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION); pos += scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n", eeprom_ver); for (ofs = 0; ofs < eeprom_len; ofs += 16) { pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos, buf_size - pos, 0); pos += strlen(buf + pos); if (buf_size - pos > 0) buf[pos++] = '\n'; } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; }
static void il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb, struct ieee80211_rx_status *stats) { struct il_rx_pkt *pkt = rxb_addr(rxb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt); struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt); struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt); u32 len = le16_to_cpu(rx_hdr->len); struct sk_buff *skb; __le16 fc = hdr->frame_control; u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order; /* We received data from the HW, so stop the watchdog */ if (unlikely(len + IL39_RX_FRAME_SIZE > fraglen)) { D_DROP("Corruption detected!\n"); return; } /* We only process data packets if the interface is open */ if (unlikely(!il->is_open)) { D_DROP("Dropping packet while interface is not open.\n"); return; } if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) { il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); D_INFO("Woke queues - frame received on passive channel\n"); } skb = dev_alloc_skb(SMALL_PACKET_SIZE); if (!skb) { IL_ERR("dev_alloc_skb failed\n"); return; } if (!il3945_mod_params.sw_crypto) il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt, le32_to_cpu(rx_end->status), stats); /* If frame is small enough to fit into skb->head, copy it * and do not consume a full page */ if (len <= SMALL_PACKET_SIZE) { memcpy(skb_put(skb, len), rx_hdr->payload, len); } else { skb_add_rx_frag(skb, 0, rxb->page, (void *)rx_hdr->payload - (void *)pkt, len, fraglen); il->alloc_rxb_page--; rxb->page = NULL; } il_update_stats(il, false, fc, len); memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx(il->hw, skb); }
static ssize_t il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; struct il_tx_queue *txq; struct il_queue *q; char *buf; int pos = 0; int cnt; int ret; const size_t bufsz = sizeof(char) * 64 * il->cfg->num_of_queues; if (!il->txq) { IL_ERR("txq not ready\n"); return -EAGAIN; } buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { txq = &il->txq[cnt]; q = &txq->q; pos += scnprintf(buf + pos, bufsz - pos, "hwq %.2d: read=%u write=%u stop=%d" " swq_id=%#.2x (ac %d/hwq %d)\n", cnt, q->read_ptr, q->write_ptr, !!test_bit(cnt, il->queue_stopped), txq->swq_id, txq->swq_id & 3, (txq->swq_id >> 2) & 0x1f); if (cnt >= 4) continue; /* for the ACs, display the stop count too */ pos += scnprintf(buf + pos, bufsz - pos, " stop-count: %d\n", atomic_read(&il->queue_stop_count[cnt])); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; }
int il3945_hw_nic_init(struct il_priv *il) { int rc; unsigned long flags; struct il_rx_queue *rxq = &il->rxq; spin_lock_irqsave(&il->lock, flags); il3945_apm_init(il); spin_unlock_irqrestore(&il->lock, flags); il3945_set_pwr_vmain(il); il3945_nic_config(il); /* Allocate the RX queue, or reset if it is already allocated */ if (!rxq->bd) { rc = il_rx_queue_alloc(il); if (rc) { IL_ERR("Unable to initialize Rx queue\n"); return -ENOMEM; } } else il3945_rx_queue_reset(il, rxq); il3945_rx_replenish(il); il3945_rx_init(il, rxq); /* Look at using this instead: rxq->need_update = 1; il_rx_queue_update_write_ptr(il, rxq); */ il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7); rc = il3945_txq_ctx_reset(il); if (rc) return rc; set_bit(S_INIT, &il->status); return 0; }
/** * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr] * * Does NOT advance any idxes */ void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) { struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds; int idx = txq->q.read_ptr; struct il3945_tfd *tfd = &tfd_tmp[idx]; struct pci_dev *dev = il->pci_dev; int i; int counter; /* sanity check */ counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); if (counter > NUM_TFD_CHUNKS) { IL_ERR("Too many chunks: %i\n", counter); /* @todo issue fatal error, it is quite serious situation */ return; } /* Unmap tx_cmd */ if (counter) pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), dma_unmap_len(&txq->meta[idx], len), PCI_DMA_TODEVICE); /* unmap chunks if any */ for (i = 1; i < counter; i++) pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE); /* free SKB */ if (txq->skbs) { struct sk_buff *skb = txq->skbs[txq->q.read_ptr]; /* can be called from irqs-disabled context */ if (skb) { dev_kfree_skb_any(skb); txq->skbs[txq->q.read_ptr] = NULL; } } }
static ssize_t il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; struct ieee80211_channel *channels = NULL; const struct ieee80211_supported_band *supp_band = NULL; int pos = 0, i, bufsz = PAGE_SIZE; char *buf; ssize_t ret; if (!test_bit(S_GEO_CONFIGURED, &il->status)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ); if (supp_band) { channels = supp_band->channels; pos += scnprintf(buf + pos, bufsz - pos, "Displaying %d channels in 2.4GHz band 802.11bg):\n", supp_band->n_channels); for (i = 0; i < supp_band->n_channels; i++) pos += scnprintf(buf + pos, bufsz - pos, "%d: %ddBm: BSS%s%s, %s.\n", channels[i].hw_value, channels[i].max_power, channels[i]. flags & IEEE80211_CHAN_RADAR ? " (IEEE 802.11h required)" : "", ((channels[i]. flags & IEEE80211_CHAN_NO_IBSS) || (channels[i]. flags & IEEE80211_CHAN_RADAR)) ? "" : ", IBSS", channels[i]. flags & IEEE80211_CHAN_PASSIVE_SCAN ? "passive only" : "active/passive"); } supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ); if (supp_band) { channels = supp_band->channels; pos += scnprintf(buf + pos, bufsz - pos, "Displaying %d channels in 5.2GHz band (802.11a)\n", supp_band->n_channels); for (i = 0; i < supp_band->n_channels; i++) pos += scnprintf(buf + pos, bufsz - pos, "%d: %ddBm: BSS%s%s, %s.\n", channels[i].hw_value, channels[i].max_power, channels[i]. flags & IEEE80211_CHAN_RADAR ? " (IEEE 802.11h required)" : "", ((channels[i]. flags & IEEE80211_CHAN_NO_IBSS) || (channels[i]. flags & IEEE80211_CHAN_RADAR)) ? "" : ", IBSS", channels[i]. flags & IEEE80211_CHAN_PASSIVE_SCAN ? "passive only" : "active/passive"); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; }
/* * Create the debugfs files and directories * */ int il_dbgfs_register(struct il_priv *il, const char *name) { struct dentry *phyd = il->hw->wiphy->debugfsdir; struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; dir_drv = debugfs_create_dir(name, phyd); if (!dir_drv) return -ENOMEM; il->debugfs_dir = dir_drv; dir_data = debugfs_create_dir("data", dir_drv); if (!dir_data) goto err; dir_rf = debugfs_create_dir("rf", dir_drv); if (!dir_rf) goto err; dir_debug = debugfs_create_dir("debug", dir_drv); if (!dir_debug) goto err; DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); if (il->cfg->sensitivity_calib_by_driver) DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); if (il->cfg->chain_noise_calib_by_driver) DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); if (il->cfg->sensitivity_calib_by_driver) DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &il->disable_sens_cal); if (il->cfg->chain_noise_calib_by_driver) DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, &il->disable_chain_noise_cal); DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal); return 0; err: IL_ERR("Can't create the debugfs directory\n"); il_dbgfs_unregister(il); return -ENOMEM; }
static ssize_t il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; int cnt = 0; char *buf; int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100; ssize_t ret; struct il_chain_noise_data *data; data = &il->chain_noise_data; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", data->active_chains); pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n", data->chain_noise_a); pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n", data->chain_noise_b); pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n", data->chain_noise_c); pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n", data->chain_signal_a); pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n", data->chain_signal_b); pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n", data->chain_signal_c); pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n", data->beacon_count); pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t"); for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { pos += scnprintf(buf + pos, bufsz - pos, " %u", data->disconn_array[cnt]); } pos += scnprintf(buf + pos, bufsz - pos, "\n"); pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t"); for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { pos += scnprintf(buf + pos, bufsz - pos, " %u", data->delta_gain_code[cnt]); } pos += scnprintf(buf + pos, bufsz - pos, "\n"); pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n", data->radio_write); pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n", data->state); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; }
/** * il3945_hdl_tx - Handle Tx response */ static void il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) { struct il_rx_pkt *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); int idx = SEQ_TO_IDX(sequence); struct il_tx_queue *txq = &il->txq[txq_id]; struct ieee80211_tx_info *info; struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; u32 status = le32_to_cpu(tx_resp->status); int rate_idx; int fail; if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { IL_ERR("Read idx for DMA queue txq_id (%d) idx %d " "is out of range [0-%d] %d %d\n", txq_id, idx, txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); return; } /* * Firmware will not transmit frame on passive channel, if it not yet * received some valid frame on that channel. When this error happen * we have to wait until firmware will unblock itself i.e. when we * note received beacon or other frame. We unblock queues in * il3945_pass_packet_to_mac80211 or in il_mac_bss_info_changed. */ if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) && il->iw_mode == NL80211_IFTYPE_STATION) { il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE); D_INFO("Stopped queues - RX waiting on passive channel\n"); } txq->time_stamp = jiffies; info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]); ieee80211_tx_info_clear_status(info); /* Fill the MRR chain with some info about on-chip retransmissions */ rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate); if (info->band == IEEE80211_BAND_5GHZ) rate_idx -= IL_FIRST_OFDM_RATE; fail = tx_resp->failure_frame; info->status.rates[0].idx = rate_idx; info->status.rates[0].count = fail + 1; /* add final attempt */ /* tx_status->rts_retry_count = tx_resp->failure_rts; */ info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0; D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id, il3945_get_tx_fail_reason(status), status, tx_resp->rate, tx_resp->failure_frame); D_TX_REPLY("Tx queue reclaim %d\n", idx); il3945_tx_queue_reclaim(il, txq_id, idx); if (status & TX_ABORT_REQUIRED_MSK) IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n"); }
static int il3945_send_rxon_assoc(struct il_priv *il) { int rc = 0; struct il_rx_pkt *pkt; struct il3945_rxon_assoc_cmd rxon_assoc; struct il_host_cmd cmd = { .id = C_RXON_ASSOC, .len = sizeof(rxon_assoc), .flags = CMD_WANT_SKB, .data = &rxon_assoc, }; const struct il_rxon_cmd *rxon1 = &il->staging; const struct il_rxon_cmd *rxon2 = &il->active; if (rxon1->flags == rxon2->flags && rxon1->filter_flags == rxon2->filter_flags && rxon1->cck_basic_rates == rxon2->cck_basic_rates && rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) { D_INFO("Using current RXON_ASSOC. Not resending.\n"); return 0; } rxon_assoc.flags = il->staging.flags; rxon_assoc.filter_flags = il->staging.filter_flags; rxon_assoc.ofdm_basic_rates = il->staging.ofdm_basic_rates; rxon_assoc.cck_basic_rates = il->staging.cck_basic_rates; rxon_assoc.reserved = 0; rc = il_send_cmd_sync(il, &cmd); if (rc) return rc; pkt = (struct il_rx_pkt *)cmd.reply_page; if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { IL_ERR("Bad return from C_RXON_ASSOC command\n"); rc = -EIO; } il_free_pages(il, cmd.reply_page); return rc; } /** * il3945_commit_rxon - commit staging_rxon to hardware * * The RXON command in staging_rxon is committed to the hardware and * the active_rxon structure is updated with the new data. This * function correctly transitions out of the RXON_ASSOC_MSK state if * a HW tune is required based on the RXON structure changes. */ int il3945_commit_rxon(struct il_priv *il) { /* cast away the const for active_rxon in this function */ struct il3945_rxon_cmd *active_rxon = (void *)&il->active; struct il3945_rxon_cmd *staging_rxon = (void *)&il->staging; int rc = 0; bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK); if (test_bit(S_EXIT_PENDING, &il->status)) return -EINVAL; if (!il_is_alive(il)) return -1; /* always get timestamp with Rx frame */ staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK; /* select antenna */ staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); staging_rxon->flags |= il3945_get_antenna_flags(il); rc = il_check_rxon_cmd(il); if (rc) { IL_ERR("Invalid RXON configuration. Not committing.\n"); return -EINVAL; } /* If we don't need to send a full RXON, we can use * il3945_rxon_assoc_cmd which is used to reconfigure filter * and other flags for the current radio configuration. */ if (!il_full_rxon_required(il)) { rc = il_send_rxon_assoc(il); if (rc) { IL_ERR("Error setting RXON_ASSOC " "configuration (%d).\n", rc); return rc; } memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); /* * We do not commit tx power settings while channel changing, * do it now if tx power changed. */ il_set_tx_power(il, il->tx_power_next, false); return 0; } /* If we are currently associated and the new config requires * an RXON_ASSOC and the new config wants the associated mask enabled, * we must clear the associated from the active configuration * before we apply the new config */ if (il_is_associated(il) && new_assoc) { D_INFO("Toggling asso
/** * il3945_send_tx_power - fill in Tx Power command with gain settings * * Configures power settings for all rates for the current channel, * using values from channel info struct, and send to NIC */ static int il3945_send_tx_power(struct il_priv *il) { int rate_idx, i; const struct il_channel_info *ch_info = NULL; struct il3945_txpowertable_cmd txpower = { .channel = il->active.channel, }; u16 chan; if (WARN_ONCE (test_bit(S_SCAN_HW, &il->status), "TX Power requested while scanning!\n")) return -EAGAIN; chan = le16_to_cpu(il->active.channel); txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1; ch_info = il_get_channel_info(il, il->band, chan); if (!ch_info) { IL_ERR("Failed to get channel info for channel %d [%d]\n", chan, il->band); return -EINVAL; } if (!il_is_channel_valid(ch_info)) { D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n"); return 0; } /* fill cmd with power settings for all rates for current channel */ /* Fill OFDM rate */ for (rate_idx = IL_FIRST_OFDM_RATE, i = 0; rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) { txpower.power[i].tpc = ch_info->power_info[i].tpc; txpower.power[i].rate = il3945_rates[rate_idx].plcp; D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n", le16_to_cpu(txpower.channel), txpower.band, txpower.power[i].tpc.tx_gain, txpower.power[i].tpc.dsp_atten, txpower.power[i].rate); } /* Fill CCK rates */ for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE; rate_idx++, i++) { txpower.power[i].tpc = ch_info->power_info[i].tpc; txpower.power[i].rate = il3945_rates[rate_idx].plcp; D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n", le16_to_cpu(txpower.channel), txpower.band, txpower.power[i].tpc.tx_gain, txpower.power[i].tpc.dsp_atten, txpower.power[i].rate); } return il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(struct il3945_txpowertable_cmd), &txpower); } /** * il3945_hw_reg_set_new_power - Configures power tables at new levels * @ch_info: Channel to update. Uses power_info.requested_power. * * Replace requested_power and base_power_idx ch_info fields for * one channel. * * Called if user or spectrum management changes power preferences. * Takes into account h/w and modulation limitations (clip power). * * This does *not* send anything to NIC, just sets up ch_info for one channel. * * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to * properly fill out the scan powers, and actual h/w gain settings, * and send changes to NIC */ static int il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info) { struct il3945_channel_power_info *power_info; int power_changed = 0; int i; const s8 *clip_pwrs; int power; /* Get this chnlgrp's rate-to-max/clip-powers table */ clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers; /* Get this channel's rate-to-current-power settings table */ power_info = ch_info->power_info; /* update OFDM Txpower settings */ for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) { int delta_idx; /* limit new power to be no more than h/w capability */ power = min(ch_info->curr_txpow, clip_pwrs[i]); if (power == power_info->requested_power) continue; /* find difference between old and new requested powers, * update base (non-temp-compensated) power idx */ delta_idx = (power - power_info->requested_power) * 2; power_info->base_power_idx -= delta_idx; /* save new requested power value */ power_info->requested_power = power; power_changed = 1; } /* update CCK Txpower settings, based on OFDM 12M setting ... * ... all CCK power settings for a given channel are the *same*. */ if (power_changed) { power = ch_info->power_info[RATE_12M_IDX_TBL].requested_power + IL_CCK_FROM_OFDM_POWER_DIFF; /* do all CCK rates' il3945_channel_power_info structures */ for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) { power_info->requested_power = power; power_info->base_power_idx = ch_info->power_info[RATE_12M_IDX_TBL]. base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF; ++power_info; } } return 0; } /** * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel * * NOTE: Returned power limit may be less (but not more) than requested, * based strictly on regulatory (eeprom and spectrum mgt) limitations * (no consideration for h/w clipping limitations). */ static int il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info) { s8 max_power; #if 0 /* if we're using TGd limits, use lower of TGd or EEPROM */ if (ch_info->tgd_data.max_power != 0) max_power = min(ch_info->tgd_data.max_power, ch_info->eeprom.max_power_avg); /* else just use EEPROM limits */ else #endif max_power = ch_info->eeprom.max_power_avg; return min(max_power, ch_info->max_power_avg); }
/** * il3945_disable_events - Disable selected events in uCode event log * * Disable an event by writing "1"s into "disable" * bitmap in SRAM. Bit position corresponds to Event # (id/type). * Default values of 0 enable uCode events to be logged. * Use for only special debugging. This function is just a placeholder as-is, * you'll need to provide the special bits! ... * ... and set IL_EVT_DISABLE to 1. */ void il3945_disable_events(struct il_priv *il) { int i; u32 base; /* SRAM address of event log header */ u32 disable_ptr; /* SRAM address of event-disable bitmap array */ u32 array_size; /* # of u32 entries in array */ static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = { 0x00000000, /* 31 - 0 Event id numbers */ 0x00000000, /* 63 - 32 */ 0x00000000, /* 95 - 64 */ 0x00000000, /* 127 - 96 */ 0x00000000, /* 159 - 128 */ 0x00000000, /* 191 - 160 */ 0x00000000, /* 223 - 192 */ 0x00000000, /* 255 - 224 */ 0x00000000, /* 287 - 256 */ 0x00000000, /* 319 - 288 */ 0x00000000, /* 351 - 320 */ 0x00000000, /* 383 - 352 */ 0x00000000, /* 415 - 384 */ 0x00000000, /* 447 - 416 */ 0x00000000, /* 479 - 448 */ 0x00000000, /* 511 - 480 */ 0x00000000, /* 543 - 512 */ 0x00000000, /* 575 - 544 */ 0x00000000, /* 607 - 576 */ 0x00000000, /* 639 - 608 */ 0x00000000, /* 671 - 640 */ 0x00000000, /* 703 - 672 */ 0x00000000, /* 735 - 704 */ 0x00000000, /* 767 - 736 */ 0x00000000, /* 799 - 768 */ 0x00000000, /* 831 - 800 */ 0x00000000, /* 863 - 832 */ 0x00000000, /* 895 - 864 */ 0x00000000, /* 927 - 896 */ 0x00000000, /* 959 - 928 */ 0x00000000, /* 991 - 960 */ 0x00000000, /* 1023 - 992 */ 0x00000000, /* 1055 - 1024 */ 0x00000000, /* 1087 - 1056 */ 0x00000000, /* 1119 - 1088 */ 0x00000000, /* 1151 - 1120 */ 0x00000000, /* 1183 - 1152 */ 0x00000000, /* 1215 - 1184 */ 0x00000000, /* 1247 - 1216 */ 0x00000000, /* 1279 - 1248 */ 0x00000000, /* 1311 - 1280 */ 0x00000000, /* 1343 - 1312 */ 0x00000000, /* 1375 - 1344 */ 0x00000000, /* 1407 - 1376 */ 0x00000000, /* 1439 - 1408 */ 0x00000000, /* 1471 - 1440 */ 0x00000000, /* 1503 - 1472 */ }; base = le32_to_cpu(il->card_alive.log_event_table_ptr); if (!il3945_hw_valid_rtc_data_addr(base)) { IL_ERR("Invalid event log pointer 0x%08X\n", base); return; } disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32))); array_size = il_read_targ_mem(il, base + (5 * sizeof(u32))); if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) { D_INFO("Disabling selected uCode log events at 0x%x\n", disable_ptr); for (i = 0; i < IL_EVT_DISABLE_SIZE; i++) il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)), evt_disable[i]); } else { D_INFO("Selected uCode log events may be disabled\n"); D_INFO(" by writing \"1\"s into disable bitmap\n"); D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr, array_size); } }
static ssize_t il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; int cnt = 0; char *buf; int bufsz = 24 * 64; /* 24 items * 64 char per item */ ssize_t ret; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } pos += scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n"); pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", il->isr_stats.hw); pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", il->isr_stats.sw); if (il->isr_stats.sw || il->isr_stats.hw) { pos += scnprintf(buf + pos, bufsz - pos, "\tLast Restarting Code: 0x%X\n", il->isr_stats.err_code); } #ifdef CONFIG_IWLEGACY_DEBUG pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", il->isr_stats.sch); pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", il->isr_stats.alive); #endif pos += scnprintf(buf + pos, bufsz - pos, "HW RF KILL switch toggled:\t %u\n", il->isr_stats.rfkill); pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", il->isr_stats.ctkill); pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", il->isr_stats.wakeup); pos += scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n", il->isr_stats.rx); for (cnt = 0; cnt < IL_CN_MAX; cnt++) { if (il->isr_stats.handlers[cnt] > 0) pos += scnprintf(buf + pos, bufsz - pos, "\tRx handler[%36s]:\t\t %u\n", il_get_cmd_string(cnt), il->isr_stats.handlers[cnt]); } pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", il->isr_stats.tx); pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", il->isr_stats.unhandled); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; }
ssize_t il3945_ucode_general_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300; ssize_t ret; struct iwl39_stats_general *general, *accum_general; struct iwl39_stats_general *delta_general, *max_general; struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* * The statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ general = &il->_3945.stats.general; dbg = &il->_3945.stats.general.dbg; div = &il->_3945.stats.general.div; accum_general = &il->_3945.accum_stats.general; delta_general = &il->_3945.delta_stats.general; max_general = &il->_3945.max_delta.general; accum_dbg = &il->_3945.accum_stats.general.dbg; delta_dbg = &il->_3945.delta_stats.general.dbg; max_dbg = &il->_3945.max_delta.general.dbg; accum_div = &il->_3945.accum_stats.general.div; delta_div = &il->_3945.delta_stats.general.div; max_div = &il->_3945.max_delta.general.div; pos += il3945_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_General:"); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "burst_check:", le32_to_cpu(dbg->burst_check), accum_dbg->burst_check, delta_dbg->burst_check, max_dbg->burst_check); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "burst_count:", le32_to_cpu(dbg->burst_count), accum_dbg->burst_count, delta_dbg->burst_count, max_dbg->burst_count); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sleep_time:", le32_to_cpu(general->sleep_time), accum_general->sleep_time, delta_general->sleep_time, max_general->sleep_time); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "slots_out:", le32_to_cpu(general->slots_out), accum_general->slots_out, delta_general->slots_out, max_general->slots_out); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "slots_idle:", le32_to_cpu(general->slots_idle), accum_general->slots_idle, delta_general->slots_idle, max_general->slots_idle); pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n", le32_to_cpu(general->ttl_timestamp)); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "tx_on_a:", le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, delta_div->tx_on_a, max_div->tx_on_a); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "tx_on_b:", le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, delta_div->tx_on_b, max_div->tx_on_b); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "exec_time:", le32_to_cpu(div->exec_time), accum_div->exec_time, delta_div->exec_time, max_div->exec_time); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "probe_time:", le32_to_cpu(div->probe_time), accum_div->probe_time, delta_div->probe_time, max_div->probe_time); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; }
ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = sizeof(struct iwl39_stats_rx_phy) * 40 + sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400; ssize_t ret; struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; struct iwl39_stats_rx_non_phy *general, *accum_general; struct iwl39_stats_rx_non_phy *delta_general, *max_general; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* * The statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ ofdm = &il->_3945.stats.rx.ofdm; cck = &il->_3945.stats.rx.cck; general = &il->_3945.stats.rx.general; accum_ofdm = &il->_3945.accum_stats.rx.ofdm; accum_cck = &il->_3945.accum_stats.rx.cck; accum_general = &il->_3945.accum_stats.rx.general; delta_ofdm = &il->_3945.delta_stats.rx.ofdm; delta_cck = &il->_3945.delta_stats.rx.cck; delta_general = &il->_3945.delta_stats.rx.general; max_ofdm = &il->_3945.max_delta.rx.ofdm; max_cck = &il->_3945.max_delta.rx.cck; max_general = &il->_3945.max_delta.rx.general; pos += il3945_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Rx - OFDM:"); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "ina_cnt:", le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt, delta_ofdm->ina_cnt, max_ofdm->ina_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_cnt:", le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, delta_ofdm->fina_cnt, max_ofdm->fina_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "plcp_err:", le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, delta_ofdm->plcp_err, max_ofdm->plcp_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "crc32_err:", le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, delta_ofdm->crc32_err, max_ofdm->crc32_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "overrun_err:", le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err, delta_ofdm->overrun_err, max_ofdm->overrun_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "early_overrun_err:", le32_to_cpu(ofdm->early_overrun_err), accum_ofdm->early_overrun_err, delta_ofdm->early_overrun_err, max_ofdm->early_overrun_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "crc32_good:", le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good, delta_ofdm->crc32_good, max_ofdm->crc32_good); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", le32_to_cpu(ofdm->false_alarm_cnt), accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt, max_ofdm->false_alarm_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:", le32_to_cpu(ofdm->fina_sync_err_cnt), accum_ofdm->fina_sync_err_cnt, delta_ofdm->fina_sync_err_cnt, max_ofdm->fina_sync_err_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sfd_timeout:", le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_timeout:", le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, max_ofdm->fina_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "unresponded_rts:", le32_to_cpu(ofdm->unresponded_rts), accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts, max_ofdm->unresponded_rts); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "rxe_frame_lmt_ovrun:", le32_to_cpu(ofdm->rxe_frame_limit_overrun), accum_ofdm->rxe_frame_limit_overrun, delta_ofdm->rxe_frame_limit_overrun, max_ofdm->rxe_frame_limit_overrun); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Rx - CCK:"); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "ina_cnt:", le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, delta_cck->ina_cnt, max_cck->ina_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_cnt:", le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, delta_cck->fina_cnt, max_cck->fina_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "plcp_err:", le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, delta_cck->plcp_err, max_cck->plcp_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "crc32_err:", le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, delta_cck->crc32_err, max_cck->crc32_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "overrun_err:", le32_to_cpu(cck->overrun_err), accum_cck->overrun_err, delta_cck->overrun_err, max_cck->overrun_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "early_overrun_err:", le32_to_cpu(cck->early_overrun_err), accum_cck->early_overrun_err, delta_cck->early_overrun_err, max_cck->early_overrun_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "crc32_good:", le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, delta_cck->crc32_good, max_cck->crc32_good); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", le32_to_cpu(cck->false_alarm_cnt), accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:", le32_to_cpu(cck->fina_sync_err_cnt), accum_cck->fina_sync_err_cnt, delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sfd_timeout:", le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout, delta_cck->sfd_timeout, max_cck->sfd_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_timeout:", le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout, delta_cck->fina_timeout, max_cck->fina_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "unresponded_rts:", le32_to_cpu(cck->unresponded_rts), accum_cck->unresponded_rts, delta_cck->unresponded_rts, max_cck->unresponded_rts); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "rxe_frame_lmt_ovrun:", le32_to_cpu(cck->rxe_frame_limit_overrun), accum_cck->rxe_frame_limit_overrun, delta_cck->rxe_frame_limit_overrun, max_cck->rxe_frame_limit_overrun); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Rx - GENERAL:"); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "bogus_cts:", le32_to_cpu(general->bogus_cts), accum_general->bogus_cts, delta_general->bogus_cts, max_general->bogus_cts); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "bogus_ack:", le32_to_cpu(general->bogus_ack), accum_general->bogus_ack, delta_general->bogus_ack, max_general->bogus_ack); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:", le32_to_cpu(general->non_bssid_frames), accum_general->non_bssid_frames, delta_general->non_bssid_frames, max_general->non_bssid_frames); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "filtered_frames:", le32_to_cpu(general->filtered_frames), accum_general->filtered_frames, delta_general->filtered_frames, max_general->filtered_frames); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "non_channel_beacons:", le32_to_cpu(general->non_channel_beacons), accum_general->non_channel_beacons, delta_general->non_channel_beacons, max_general->non_channel_beacons); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; }
static ssize_t il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; int cnt = 0; char *buf; int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100; ssize_t ret; struct il_sensitivity_data *data; data = &il->sensitivity_data; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", data->auto_corr_ofdm); pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n", data->auto_corr_ofdm_mrc); pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n", data->auto_corr_ofdm_x1); pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n", data->auto_corr_ofdm_mrc_x1); pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n", data->auto_corr_cck); pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n", data->auto_corr_cck_mrc); pos += scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_ofdm:\t\t %u\n", data->last_bad_plcp_cnt_ofdm); pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n", data->last_fa_cnt_ofdm); pos += scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n", data->last_bad_plcp_cnt_cck); pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n", data->last_fa_cnt_cck); pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n", data->nrg_curr_state); pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n", data->nrg_prev_state); pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t"); for (cnt = 0; cnt < 10; cnt++) { pos += scnprintf(buf + pos, bufsz - pos, " %u", data->nrg_value[cnt]); } pos += scnprintf(buf + pos, bufsz - pos, "\n"); pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t"); for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) { pos += scnprintf(buf + pos, bufsz - pos, " %u", data->nrg_silence_rssi[cnt]); } pos += scnprintf(buf + pos, bufsz - pos, "\n"); pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n", data->nrg_silence_ref); pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n", data->nrg_energy_idx); pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n", data->nrg_silence_idx); pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n", data->nrg_th_cck); pos += scnprintf(buf + pos, bufsz - pos, "nrg_auto_corr_silence_diff:\t %u\n", data->nrg_auto_corr_silence_diff); pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n", data->num_in_cck_no_fa); pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n", data->nrg_th_ofdm); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; }
ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250; ssize_t ret; struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* * The statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ tx = &il->_3945.stats.tx; accum_tx = &il->_3945.accum_stats.tx; delta_tx = &il->_3945.delta_stats.tx; max_tx = &il->_3945.max_delta.tx; pos += il3945_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Tx:"); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "preamble:", le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt, delta_tx->preamble_cnt, max_tx->preamble_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:", le32_to_cpu(tx->rx_detected_cnt), accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:", le32_to_cpu(tx->bt_prio_defer_cnt), accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt, max_tx->bt_prio_defer_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:", le32_to_cpu(tx->bt_prio_kill_cnt), accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt, max_tx->bt_prio_kill_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:", le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt, delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "cts_timeout:", le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, delta_tx->cts_timeout, max_tx->cts_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "ack_timeout:", le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout, delta_tx->ack_timeout, max_tx->ack_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:", le32_to_cpu(tx->expected_ack_cnt), accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt, max_tx->expected_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:", le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt, delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; }