コード例 #1
0
ファイル: if_iwm_scan.c プロジェクト: waddlesplash/haiku
void
iwm_mvm_rx_lmac_scan_complete_notif(struct iwm_softc *sc,
    struct iwm_rx_packet *pkt)
{
	struct iwm_periodic_scan_complete *scan_notif = (void *)pkt->data;

	/* If this happens, the firmware has mistakenly sent an LMAC
	 * notification during UMAC scans -- warn and ignore it.
	 */
	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
		device_printf(sc->sc_dev,
		    "%s: Mistakenly got LMAC notification during UMAC scan\n",
		    __func__);
		return;
	}

	IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "Regular scan %s, EBS status %s (FW)\n",
	    iwm_mvm_offload_status_str(scan_notif->status),
	    iwm_mvm_ebs_status_str(scan_notif->ebs_status));

	sc->last_ebs_successful =
			scan_notif->ebs_status == IWM_SCAN_EBS_SUCCESS ||
			scan_notif->ebs_status == IWM_SCAN_EBS_INACTIVE;

}
コード例 #2
0
ファイル: if_iwm_scan.c プロジェクト: waddlesplash/haiku
static inline boolean_t
iwm_mvm_rrm_scan_needed(struct iwm_softc *sc)
{
	/* require rrm scan whenever the fw supports it */
	return fw_has_capa(&sc->ucode_capa,
			   IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
}
コード例 #3
0
ファイル: nvm.c プロジェクト: asmalldev/linux
static struct iwl_nvm_data *
iwl_parse_nvm_sections(struct iwl_mvm *mvm)
{
	struct iwl_nvm_section *sections = mvm->nvm_sections;
	const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
	bool lar_enabled;

	/* Checking for required sections */
	if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
		if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
		    !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
			IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
			return NULL;
		}
	} else {
		/* SW and REGULATORY sections are mandatory */
		if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
		    !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
			IWL_ERR(mvm,
				"Can't parse empty family 8000 OTP/NVM sections\n");
			return NULL;
		}
		/* MAC_OVERRIDE or at least HW section must exist */
		if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
		    !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
			IWL_ERR(mvm,
				"Can't parse mac_address, empty sections\n");
			return NULL;
		}

		/* PHY_SKU section is mandatory in B0 */
		if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
			IWL_ERR(mvm,
				"Can't parse phy_sku in B0, empty sections\n");
			return NULL;
		}
	}

	if (WARN_ON(!mvm->cfg))
		return NULL;

	hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
	sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
	calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
	regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
	mac_override =
		(const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
	phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;

	lar_enabled = !iwlwifi_mod_params.lar_disable &&
		      fw_has_capa(&mvm->fw->ucode_capa,
				  IWL_UCODE_TLV_CAPA_LAR_SUPPORT);

	return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
				  regulatory, mac_override, phy_sku,
				  mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
				  lar_enabled);
}
コード例 #4
0
ファイル: tt.c プロジェクト: Lyude/linux
static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
{
	struct iwl_dts_measurement_cmd cmd = {
		.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
	};
	struct iwl_ext_dts_measurement_cmd extcmd = {
		.control_mode = cpu_to_le32(DTS_AUTOMATIC),
	};
	u32 cmdid;

	cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
			   PHY_OPS_GROUP, 0);

	if (!fw_has_capa(&mvm->fw->ucode_capa,
			 IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
		return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd);

	return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
}
コード例 #5
0
ファイル: fw.c プロジェクト: BORETS24/common.git-android-4.4
int iwl_mvm_up(struct iwl_mvm *mvm)
{
	int ret, i;
	struct ieee80211_channel *chan;
	struct cfg80211_chan_def chandef;

	lockdep_assert_held(&mvm->mutex);

	ret = iwl_trans_start_hw(mvm->trans);
	if (ret)
		return ret;

	/*
	 * If we haven't completed the run of the init ucode during
	 * module loading, load init ucode now
	 * (for example, if we were in RFKILL)
	 */
	ret = iwl_run_init_mvm_ucode(mvm, false);
	if (ret && !iwlmvm_mod_params.init_dbg) {
		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
		/* this can't happen */
		if (WARN_ON(ret > 0))
			ret = -ERFKILL;
		goto error;
	}
	if (!iwlmvm_mod_params.init_dbg) {
		/*
		 * Stop and start the transport without entering low power
		 * mode. This will save the state of other components on the
		 * device that are triggered by the INIT firwmare (MFUART).
		 */
		_iwl_trans_stop_device(mvm->trans, false);
		ret = _iwl_trans_start_hw(mvm->trans, false);
		if (ret)
			goto error;
	}

	if (iwlmvm_mod_params.init_dbg)
		return 0;

	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
	if (ret) {
		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
		goto error;
	}

	iwl_mvm_get_shared_mem_conf(mvm);

	ret = iwl_mvm_sf_update(mvm, NULL, false);
	if (ret)
		IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");

	mvm->fw_dbg_conf = FW_DBG_INVALID;
	/* if we have a destination, assume EARLY START */
	if (mvm->fw->dbg_dest_tlv)
		mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
	iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);

	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
	if (ret)
		goto error;

	ret = iwl_send_bt_init_conf(mvm);
	if (ret)
		goto error;

	/* Send phy db control command and then phy db calibration*/
	ret = iwl_send_phy_db_data(mvm->phy_db);
	if (ret)
		goto error;

	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret)
		goto error;

	/* init the fw <-> mac80211 STA mapping */
	for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);

	mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;

	/* reset quota debouncing buffer - 0xff will yield invalid data */
	memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));

	/* Add auxiliary station for scanning */
	ret = iwl_mvm_add_aux_sta(mvm);
	if (ret)
		goto error;

	/* Add all the PHY contexts */
	chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
	cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
	for (i = 0; i < NUM_PHY_CTX; i++) {
		/*
		 * The channel used here isn't relevant as it's
		 * going to be overwritten in the other flows.
		 * For now use the first channel we have.
		 */
		ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
					   &chandef, 1, 1);
		if (ret)
			goto error;
	}

	/* Initialize tx backoffs to the minimal possible */
	iwl_mvm_tt_tx_backoff(mvm, 0);

	WARN_ON(iwl_mvm_config_ltr(mvm));

	ret = iwl_mvm_power_update_device(mvm);
	if (ret)
		goto error;

	/*
	 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
	 * anyway, so don't init MCC.
	 */
	if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
		ret = iwl_mvm_init_mcc(mvm);
		if (ret)
			goto error;
	}

	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
		ret = iwl_mvm_config_scan(mvm);
		if (ret)
			goto error;
	}

	if (iwl_mvm_is_csum_supported(mvm) &&
	    mvm->cfg->features & NETIF_F_RXCSUM)
		iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);

	/* allow FW/transport low power modes if not during restart */
	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
		iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);

	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
	return 0;
 error:
	iwl_trans_stop_device(mvm->trans);
	return ret;
}
コード例 #6
0
ファイル: fw.c プロジェクト: 020gzh/linux
/* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{
	int blk_idx;
	__le32 dev_phy_addr;
	struct iwl_fw_paging_cmd fw_paging_cmd = {
		.flags =
			cpu_to_le32(PAGING_CMD_IS_SECURED |
				    PAGING_CMD_IS_ENABLED |
				    (mvm->num_of_pages_in_last_blk <<
				    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
		.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
		.block_num = cpu_to_le32(mvm->num_of_paging_blk),
	};

	/* loop for for all paging blocks + CSS block */
	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
		dev_phy_addr =
			cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
				    PAGE_2_EXP_SIZE);
		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
	}

	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
						    IWL_ALWAYS_LONG_GROUP, 0),
				    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
}

/*
 * Send paging item cmd to FW in case CPU2 has paging image
 */
static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
{
	int ret;
	struct iwl_fw_get_item_cmd fw_get_item_cmd = {
		.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
	};

	struct iwl_fw_get_item_resp *item_resp;
	struct iwl_host_cmd cmd = {
		.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
		.data = { &fw_get_item_cmd, },
	};

	cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret) {
		IWL_ERR(mvm,
			"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
			ret);
		return ret;
	}

	item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
	if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
		IWL_ERR(mvm,
			"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
			le32_to_cpu(item_resp->item_id));
		ret = -EIO;
		goto exit;
	}

	mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
						  GFP_KERNEL);
	if (!mvm->trans->paging_download_buf) {
		ret = -ENOMEM;
		goto exit;
	}
	mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
	mvm->trans->paging_db = mvm->fw_paging_db;
	IWL_DEBUG_FW(mvm,
		     "Paging: got paging request address (paging_req_addr 0x%08x)\n",
		     mvm->trans->paging_req_addr);

exit:
	iwl_free_resp(&cmd);

	return ret;
}

static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
			 struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_mvm *mvm =
		container_of(notif_wait, struct iwl_mvm, notif_wait);
	struct iwl_mvm_alive_data *alive_data = data;
	struct mvm_alive_resp_ver1 *palive1;
	struct mvm_alive_resp_ver2 *palive2;
	struct mvm_alive_resp *palive;

	if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
		palive1 = (void *)pkt->data;

		mvm->support_umac_log = false;
		mvm->error_event_table =
			le32_to_cpu(palive1->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive1->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);

		alive_data->valid = le16_to_cpu(palive1->status) ==
				    IWL_ALIVE_STATUS_OK;
		IWL_DEBUG_FW(mvm,
			     "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive1->status), palive1->ver_type,
			     palive1->ver_subtype, palive1->flags);
	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
		palive2 = (void *)pkt->data;

		mvm->error_event_table =
			le32_to_cpu(palive2->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive2->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
		mvm->umac_error_event_table =
			le32_to_cpu(palive2->error_info_addr);
		mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
		mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);

		alive_data->valid = le16_to_cpu(palive2->status) ==
				    IWL_ALIVE_STATUS_OK;
		if (mvm->umac_error_event_table)
			mvm->support_umac_log = true;

		IWL_DEBUG_FW(mvm,
			     "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive2->status), palive2->ver_type,
			     palive2->ver_subtype, palive2->flags);

		IWL_DEBUG_FW(mvm,
			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
			     palive2->umac_major, palive2->umac_minor);
	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
		palive = (void *)pkt->data;

		mvm->error_event_table =
			le32_to_cpu(palive->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
		mvm->umac_error_event_table =
			le32_to_cpu(palive->error_info_addr);
		mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
		mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);

		alive_data->valid = le16_to_cpu(palive->status) ==
				    IWL_ALIVE_STATUS_OK;
		if (mvm->umac_error_event_table)
			mvm->support_umac_log = true;

		IWL_DEBUG_FW(mvm,
			     "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive->status), palive->ver_type,
			     palive->ver_subtype, palive->flags);

		IWL_DEBUG_FW(mvm,
			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
			     le32_to_cpu(palive->umac_major),
			     le32_to_cpu(palive->umac_minor));
	}

	return true;
}

static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
				  struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_phy_db *phy_db = data;

	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
		return true;
	}

	WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));

	return false;
}

static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
					 enum iwl_ucode_type ucode_type)
{
	struct iwl_notification_wait alive_wait;
	struct iwl_mvm_alive_data alive_data;
	const struct fw_img *fw;
	int ret, i;
	enum iwl_ucode_type old_type = mvm->cur_ucode;
	static const u16 alive_cmd[] = { MVM_ALIVE };
	struct iwl_sf_region st_fwrd_space;

	if (ucode_type == IWL_UCODE_REGULAR &&
	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
	    !(fw_has_capa(&mvm->fw->ucode_capa,
			  IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
		fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
	else
		fw = iwl_get_ucode_image(mvm, ucode_type);
	if (WARN_ON(!fw))
		return -EINVAL;
	mvm->cur_ucode = ucode_type;
	mvm->ucode_loaded = false;

	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
				   alive_cmd, ARRAY_SIZE(alive_cmd),
				   iwl_alive_fn, &alive_data);

	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
	if (ret) {
		mvm->cur_ucode = old_type;
		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
		return ret;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the ALIVE notification here.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);
	if (ret) {
		if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
			IWL_ERR(mvm,
				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
				iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
				iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
		mvm->cur_ucode = old_type;
		return ret;
	}

	if (!alive_data.valid) {
		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
		mvm->cur_ucode = old_type;
		return -EIO;
	}

	/*
	 * update the sdio allocation according to the pointer we get in the
	 * alive notification.
	 */
	st_fwrd_space.addr = mvm->sf_space.addr;
	st_fwrd_space.size = mvm->sf_space.size;
	ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
	if (ret) {
		IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
		return ret;
	}

	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);

	/*
	 * configure and operate fw paging mechanism.
	 * driver configures the paging flow only once, CPU2 paging image
	 * included in the IWL_UCODE_INIT image.
	 */
	if (fw->paging_mem_size) {
		/*
		 * When dma is not enabled, the driver needs to copy / write
		 * the downloaded / uploaded page to / from the smem.
		 * This gets the location of the place were the pages are
		 * stored.
		 */
		if (!is_device_dma_capable(mvm->trans->dev)) {
			ret = iwl_trans_get_paging_item(mvm);
			if (ret) {
				IWL_ERR(mvm, "failed to get FW paging item\n");
				return ret;
			}
		}

		ret = iwl_save_fw_paging(mvm, fw);
		if (ret) {
			IWL_ERR(mvm, "failed to save the FW paging image\n");
			return ret;
		}

		ret = iwl_send_paging_cmd(mvm, fw);
		if (ret) {
			IWL_ERR(mvm, "failed to send the paging cmd\n");
			iwl_free_fw_paging(mvm);
			return ret;
		}
	}

	/*
	 * Note: all the queues are enabled as part of the interface
	 * initialization, but in firmware restart scenarios they
	 * could be stopped, so wake them up. In firmware restart,
	 * mac80211 will have the queues stopped as well until the
	 * reconfiguration completes. During normal startup, they
	 * will be empty.
	 */

	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
	mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;

	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);

	mvm->ucode_loaded = true;

	return 0;
}

static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
	struct iwl_phy_cfg_cmd phy_cfg_cmd;
	enum iwl_ucode_type ucode_type = mvm->cur_ucode;

	/* Set parameters */
	phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
	phy_cfg_cmd.calib_control.event_trigger =
		mvm->fw->default_calib[ucode_type].event_trigger;
	phy_cfg_cmd.calib_control.flow_trigger =
		mvm->fw->default_calib[ucode_type].flow_trigger;

	IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
		       phy_cfg_cmd.phy_cfg);

	return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
				    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
}

int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
	struct iwl_notification_wait calib_wait;
	static const u16 init_complete[] = {
		INIT_COMPLETE_NOTIF,
		CALIB_RES_NOTIF_PHY_DB
	};
	int ret;

	lockdep_assert_held(&mvm->mutex);

	if (WARN_ON_ONCE(mvm->calibrating))
		return 0;

	iwl_init_notification_wait(&mvm->notif_wait,
				   &calib_wait,
				   init_complete,
				   ARRAY_SIZE(init_complete),
				   iwl_wait_phy_db_entry,
				   mvm->phy_db);

	/* Will also start the device */
	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
	if (ret) {
		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
		goto error;
	}

	ret = iwl_send_bt_init_conf(mvm);
	if (ret)
		goto error;

	/* Read the NVM only at driver load time, no need to do this twice */
	if (read_nvm) {
		/* Read nvm */
		ret = iwl_nvm_init(mvm, true);
		if (ret) {
			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
			goto error;
		}
	}

	/* In case we read the NVM from external file, load it to the NIC */
	if (mvm->nvm_file_name)
		iwl_mvm_load_nvm_to_nic(mvm);

	ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
	WARN_ON(ret);

	/*
	 * abort after reading the nvm in case RF Kill is on, we will complete
	 * the init seq later when RF kill will switch to off
	 */
	if (iwl_mvm_is_radio_hw_killed(mvm)) {
		IWL_DEBUG_RF_KILL(mvm,
				  "jump over all phy activities due to RF kill\n");
		iwl_remove_notification(&mvm->notif_wait, &calib_wait);
		ret = 1;
		goto out;
	}

	mvm->calibrating = true;

	/* Send TX valid antennas before triggering calibrations */
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
	if (ret)
		goto error;

	/*
	 * Send phy configurations command to init uCode
	 * to start the 16.0 uCode init image internal calibrations.
	 */
	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret) {
		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
			ret);
		goto error;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the calibration complete notification.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
			MVM_UCODE_CALIB_TIMEOUT);

	if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
		ret = 1;
	}
	goto out;

error:
	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
	mvm->calibrating = false;
	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
		/* we want to debug INIT and we have no NVM - fake */
		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
					sizeof(struct ieee80211_channel) +
					sizeof(struct ieee80211_rate),
					GFP_KERNEL);
		if (!mvm->nvm_data)
			return -ENOMEM;
		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
		mvm->nvm_data->bands[0].n_channels = 1;
		mvm->nvm_data->bands[0].n_bitrates = 1;
		mvm->nvm_data->bands[0].bitrates =
			(void *)mvm->nvm_data->channels + 1;
		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
	}

	return ret;
}

static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
{
	struct iwl_host_cmd cmd = {
		.id = SHARED_MEM_CFG,
		.flags = CMD_WANT_SKB,
		.data = { NULL, },
		.len = { 0, },
	};
	struct iwl_rx_packet *pkt;
	struct iwl_shared_mem_cfg *mem_cfg;
	u32 i;

	lockdep_assert_held(&mvm->mutex);

	if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
		return;

	pkt = cmd.resp_pkt;
	mem_cfg = (void *)pkt->data;

	mvm->shared_mem_cfg.shared_mem_addr =
		le32_to_cpu(mem_cfg->shared_mem_addr);
	mvm->shared_mem_cfg.shared_mem_size =
		le32_to_cpu(mem_cfg->shared_mem_size);
	mvm->shared_mem_cfg.sample_buff_addr =
		le32_to_cpu(mem_cfg->sample_buff_addr);
	mvm->shared_mem_cfg.sample_buff_size =
		le32_to_cpu(mem_cfg->sample_buff_size);
	mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
		mvm->shared_mem_cfg.txfifo_size[i] =
			le32_to_cpu(mem_cfg->txfifo_size[i]);
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
		mvm->shared_mem_cfg.rxfifo_size[i] =
			le32_to_cpu(mem_cfg->rxfifo_size[i]);
	mvm->shared_mem_cfg.page_buff_addr =
		le32_to_cpu(mem_cfg->page_buff_addr);
	mvm->shared_mem_cfg.page_buff_size =
		le32_to_cpu(mem_cfg->page_buff_size);
	IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");

	iwl_free_resp(&cmd);
}
コード例 #7
0
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
{
	struct iwl_fw_error_dump_file *dump_file;
	struct iwl_fw_error_dump_data *dump_data;
	struct iwl_fw_error_dump_info *dump_info;
	struct iwl_fw_error_dump_mem *dump_mem;
	struct iwl_fw_error_dump_trigger_desc *dump_trig;
	struct iwl_mvm_dump_ptrs *fw_error_dump;
	u32 sram_len, sram_ofs;
	struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
		mvm->fw->dbg_mem_tlv;
	u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
	u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
	u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
	bool monitor_dump_only = false;
	int i;

	if (!IWL_MVM_COLLECT_FW_ERR_DUMP &&
	    !mvm->trans->dbg_dest_tlv)
		return;

	lockdep_assert_held(&mvm->mutex);

	/* there's no point in fw dump if the bus is dead */
	if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
		IWL_ERR(mvm, "Skip fw error dump since bus is dead\n");
		goto out;
	}

	if (mvm->fw_dump_trig &&
	    mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
		monitor_dump_only = true;

	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
	if (!fw_error_dump)
		goto out;

	/* SRAM - include stack CCM if driver knows the values for it */
	if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
		const struct fw_img *img;

		img = &mvm->fw->img[mvm->cur_ucode];
		sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
		sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
	} else {
		sram_ofs = mvm->cfg->dccm_offset;
		sram_len = mvm->cfg->dccm_len;
	}

	/* reading RXF/TXF sizes */
	if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
		struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;

		fifo_data_len = 0;

		/* Count RXF size */
		for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
			if (!mem_cfg->rxfifo_size[i])
				continue;

			/* Add header info */
			fifo_data_len += mem_cfg->rxfifo_size[i] +
					 sizeof(*dump_data) +
					 sizeof(struct iwl_fw_error_dump_fifo);
		}

		for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
			if (!mem_cfg->txfifo_size[i])
				continue;

			/* Add header info */
			fifo_data_len += mem_cfg->txfifo_size[i] +
					 sizeof(*dump_data) +
					 sizeof(struct iwl_fw_error_dump_fifo);
		}

		if (fw_has_capa(&mvm->fw->ucode_capa,
				IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
			for (i = 0;
			     i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
			     i++) {
				if (!mem_cfg->internal_txfifo_size[i])
					continue;

				/* Add header info */
				fifo_data_len +=
					mem_cfg->internal_txfifo_size[i] +
					sizeof(*dump_data) +
					sizeof(struct iwl_fw_error_dump_fifo);
			}
		}

		/* Make room for PRPH registers */
		for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
			/* The range includes both boundaries */
			int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
				iwl_prph_dump_addr[i].start + 4;

			prph_len += sizeof(*dump_data) +
				sizeof(struct iwl_fw_error_dump_prph) +
				num_bytes_in_chunk;
		}

		if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
			radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
	}

	file_len = sizeof(*dump_file) +
		   sizeof(*dump_data) * 2 +
		   fifo_data_len +
		   prph_len +
		   radio_len +
		   sizeof(*dump_info);

	/* Make room for the SMEM, if it exists */
	if (smem_len)
		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;

	/* Make room for the secondary SRAM, if it exists */
	if (sram2_len)
		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;

	/* Make room for MEM segments */
	for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
		if (fw_dbg_mem[i])
			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
				le32_to_cpu(fw_dbg_mem[i]->len);
	}

	/* Make room for fw's virtual image pages, if it exists */
	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
	    mvm->fw_paging_db[0].fw_paging_block)
		file_len += mvm->num_of_paging_blk *
			(sizeof(*dump_data) +
			 sizeof(struct iwl_fw_error_dump_paging) +
			 PAGING_BLOCK_SIZE);

	/* If we only want a monitor dump, reset the file length */
	if (monitor_dump_only) {
		file_len = sizeof(*dump_file) + sizeof(*dump_data) +
			   sizeof(*dump_info);
	}

	/*
	 * In 8000 HW family B-step include the ICCM (which resides separately)
	 */
	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
	    CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
		file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
			    IWL8260_ICCM_LEN;

	if (mvm->fw_dump_desc)
		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
			    mvm->fw_dump_desc->len;

	if (!mvm->fw->dbg_dynamic_mem)
		file_len += sram_len + sizeof(*dump_mem);

	dump_file = vzalloc(file_len);
	if (!dump_file) {
		kfree(fw_error_dump);
		goto out;
	}

	fw_error_dump->op_mode_ptr = dump_file;

	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
	dump_data = (void *)dump_file->data;

	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
	dump_data->len = cpu_to_le32(sizeof(*dump_info));
	dump_info = (void *)dump_data->data;
	dump_info->device_family =
		mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
	dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
	memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
	       sizeof(dump_info->fw_human_readable));
	strncpy(dump_info->dev_human_readable, mvm->cfg->name,
		sizeof(dump_info->dev_human_readable));
	strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
		sizeof(dump_info->bus_human_readable));

	dump_data = iwl_fw_error_next_data(dump_data);
	/* We only dump the FIFOs if the FW is in error state */
	if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
		iwl_mvm_dump_fifos(mvm, &dump_data);
		if (radio_len)
			iwl_mvm_read_radio_reg(mvm, &dump_data);
	}

	if (mvm->fw_dump_desc) {
		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
		dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
					     mvm->fw_dump_desc->len);
		dump_trig = (void *)dump_data->data;
		memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
		       sizeof(*dump_trig) + mvm->fw_dump_desc->len);

		dump_data = iwl_fw_error_next_data(dump_data);
	}

	/* In case we only want monitor dump, skip to dump trasport data */
	if (monitor_dump_only)
		goto dump_trans_data;

	if (!mvm->fw->dbg_dynamic_mem) {
		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
		dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
		dump_mem = (void *)dump_data->data;
		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
		dump_mem->offset = cpu_to_le32(sram_ofs);
		iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
					 sram_len);
		dump_data = iwl_fw_error_next_data(dump_data);
	}

	for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
		if (fw_dbg_mem[i]) {
			u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
			u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);

			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
			dump_data->len = cpu_to_le32(len +
					sizeof(*dump_mem));
			dump_mem = (void *)dump_data->data;
			dump_mem->type = fw_dbg_mem[i]->data_type;
			dump_mem->offset = cpu_to_le32(ofs);
			iwl_trans_read_mem_bytes(mvm->trans, ofs,
						 dump_mem->data,
						 len);
			dump_data = iwl_fw_error_next_data(dump_data);
		}
	}

	if (smem_len) {
		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
		dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
		dump_mem = (void *)dump_data->data;
		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
		dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
					 dump_mem->data, smem_len);
		dump_data = iwl_fw_error_next_data(dump_data);
	}

	if (sram2_len) {
		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
		dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
		dump_mem = (void *)dump_data->data;
		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
		dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
					 dump_mem->data, sram2_len);
		dump_data = iwl_fw_error_next_data(dump_data);
	}

	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
	    CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
		dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
					     sizeof(*dump_mem));
		dump_mem = (void *)dump_data->data;
		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
		dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
		iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
					 dump_mem->data, IWL8260_ICCM_LEN);
		dump_data = iwl_fw_error_next_data(dump_data);
	}

	/* Dump fw's virtual image */
	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
	    mvm->fw_paging_db[0].fw_paging_block) {
		for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
			struct iwl_fw_error_dump_paging *paging;
			struct page *pages =
				mvm->fw_paging_db[i].fw_paging_block;

			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
			dump_data->len = cpu_to_le32(sizeof(*paging) +
						     PAGING_BLOCK_SIZE);
			paging = (void *)dump_data->data;
			paging->index = cpu_to_le32(i);
			memcpy(paging->data, page_address(pages),
			       PAGING_BLOCK_SIZE);
			dump_data = iwl_fw_error_next_data(dump_data);
		}
	}

	if (prph_len)
		iwl_dump_prph(mvm->trans, &dump_data);

dump_trans_data:
	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
						       mvm->fw_dump_trig);
	fw_error_dump->op_mode_len = file_len;
	if (fw_error_dump->trans_ptr)
		file_len += fw_error_dump->trans_ptr->len;
	dump_file->file_len = cpu_to_le32(file_len);

	dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
		      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);

out:
	iwl_mvm_free_fw_dump_desc(mvm);
	mvm->fw_dump_trig = NULL;
	clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
}
コード例 #8
0
static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
			       struct iwl_fw_error_dump_data **dump_data)
{
	struct iwl_fw_error_dump_fifo *fifo_hdr;
	u32 *fifo_data;
	u32 fifo_len;
	unsigned long flags;
	int i, j;

	if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
		return;

	/* Pull RXF data from all RXFs */
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
		/*
		 * Keep aside the additional offset that might be needed for
		 * next RXF
		 */
		u32 offset_diff = RXF_DIFF_FROM_PREV * i;

		fifo_hdr = (void *)(*dump_data)->data;
		fifo_data = (void *)fifo_hdr->data;
		fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];

		/* No need to try to read the data if the length is 0 */
		if (fifo_len == 0)
			continue;

		/* Add a TLV for the RXF */
		(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
		(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));

		fifo_hdr->fifo_num = cpu_to_le32(i);
		fifo_hdr->available_bytes =
			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
							RXF_RD_D_SPACE +
							offset_diff));
		fifo_hdr->wr_ptr =
			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
							RXF_RD_WR_PTR +
							offset_diff));
		fifo_hdr->rd_ptr =
			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
							RXF_RD_RD_PTR +
							offset_diff));
		fifo_hdr->fence_ptr =
			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
							RXF_RD_FENCE_PTR +
							offset_diff));
		fifo_hdr->fence_mode =
			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
							RXF_SET_FENCE_MODE +
							offset_diff));

		/* Lock fence */
		iwl_trans_write_prph(mvm->trans,
				     RXF_SET_FENCE_MODE + offset_diff, 0x1);
		/* Set fence pointer to the same place like WR pointer */
		iwl_trans_write_prph(mvm->trans,
				     RXF_LD_WR2FENCE + offset_diff, 0x1);
		/* Set fence offset */
		iwl_trans_write_prph(mvm->trans,
				     RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
				     0x0);

		/* Read FIFO */
		fifo_len /= sizeof(u32); /* Size in DWORDS */
		for (j = 0; j < fifo_len; j++)
			fifo_data[j] = iwl_trans_read_prph(mvm->trans,
							 RXF_FIFO_RD_FENCE_INC +
							 offset_diff);
		*dump_data = iwl_fw_error_next_data(*dump_data);
	}

	/* Pull TXF data from all TXFs */
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
		/* Mark the number of TXF we're pulling now */
		iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);

		fifo_hdr = (void *)(*dump_data)->data;
		fifo_data = (void *)fifo_hdr->data;
		fifo_len = mvm->shared_mem_cfg.txfifo_size[i];

		/* No need to try to read the data if the length is 0 */
		if (fifo_len == 0)
			continue;

		/* Add a TLV for the FIFO */
		(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
		(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));

		fifo_hdr->fifo_num = cpu_to_le32(i);
		fifo_hdr->available_bytes =
			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
							TXF_FIFO_ITEM_CNT));
		fifo_hdr->wr_ptr =
			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
							TXF_WR_PTR));
		fifo_hdr->rd_ptr =
			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
							TXF_RD_PTR));
		fifo_hdr->fence_ptr =
			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
							TXF_FENCE_PTR));
		fifo_hdr->fence_mode =
			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
							TXF_LOCK_FENCE));

		/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
		iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
				     TXF_WR_PTR);

		/* Dummy-read to advance the read pointer to the head */
		iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);

		/* Read FIFO */
		fifo_len /= sizeof(u32); /* Size in DWORDS */
		for (j = 0; j < fifo_len; j++)
			fifo_data[j] = iwl_trans_read_prph(mvm->trans,
							  TXF_READ_MODIFY_DATA);
		*dump_data = iwl_fw_error_next_data(*dump_data);
	}

	if (fw_has_capa(&mvm->fw->ucode_capa,
			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
		/* Pull UMAC internal TXF data from all TXFs */
		for (i = 0;
		     i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
		     i++) {
			/* Mark the number of TXF we're pulling now */
			iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);

			fifo_hdr = (void *)(*dump_data)->data;
			fifo_data = (void *)fifo_hdr->data;
			fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];

			/* No need to try to read the data if the length is 0 */
			if (fifo_len == 0)
				continue;

			/* Add a TLV for the internal FIFOs */
			(*dump_data)->type =
				cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
			(*dump_data)->len =
				cpu_to_le32(fifo_len + sizeof(*fifo_hdr));

			fifo_hdr->fifo_num = cpu_to_le32(i);
			fifo_hdr->available_bytes =
				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
								TXF_CPU2_FIFO_ITEM_CNT));
			fifo_hdr->wr_ptr =
				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
								TXF_CPU2_WR_PTR));
			fifo_hdr->rd_ptr =
				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
								TXF_CPU2_RD_PTR));
			fifo_hdr->fence_ptr =
				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
								TXF_CPU2_FENCE_PTR));
			fifo_hdr->fence_mode =
				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
								TXF_CPU2_LOCK_FENCE));

			/* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
			iwl_trans_write_prph(mvm->trans,
					     TXF_CPU2_READ_MODIFY_ADDR,
					     TXF_CPU2_WR_PTR);

			/* Dummy-read to advance the read pointer to head */
			iwl_trans_read_prph(mvm->trans,
					    TXF_CPU2_READ_MODIFY_DATA);

			/* Read FIFO */
			fifo_len /= sizeof(u32); /* Size in DWORDS */
			for (j = 0; j < fifo_len; j++)
				fifo_data[j] =
					iwl_trans_read_prph(mvm->trans,
							    TXF_CPU2_READ_MODIFY_DATA);
			*dump_data = iwl_fw_error_next_data(*dump_data);
		}
	}

	iwl_trans_release_nic_access(mvm->trans, &flags);
}
コード例 #9
0
ファイル: fw.c プロジェクト: OSEC-pl/backports-iwlwifi
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
	union {
		struct iwl_dev_tx_power_cmd v5;
		struct iwl_dev_tx_power_cmd_v4 v4;
	} cmd;
	int i, j, idx;
	int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
	int len;

	BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
	BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
		     ACPI_SAR_TABLE_SIZE);

	cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);

	if (fw_has_api(&mvm->fw->ucode_capa,
		       IWL_UCODE_TLV_API_REDUCE_TX_POWER))
		len = sizeof(cmd.v5);
	else if (fw_has_capa(&mvm->fw->ucode_capa,
			     IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
		len = sizeof(cmd.v4);
	else
		len = sizeof(cmd.v4.v3);

	for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
		struct iwl_mvm_sar_profile *prof;

		/* don't allow SAR to be disabled (profile 0 means disable) */
		if (profs[i] == 0)
			return -EPERM;

		/* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
		if (profs[i] > ACPI_SAR_PROFILE_NUM)
			return -EINVAL;

		/* profiles go from 1 to 4, so decrement to access the array */
		prof = &mvm->sar_profiles[profs[i] - 1];

		/* if the profile is disabled, do nothing */
		if (!prof->enabled) {
			IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
					profs[i]);
			/* if one of the profiles is disabled, we fail all */
			return -ENOENT;
		}

		IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
		for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
			idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
			cmd.v5.v3.per_chain_restriction[i][j] =
				cpu_to_le16(prof->table[idx]);
			IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
					j, prof->table[idx]);
		}
	}

#ifdef CPTCFG_IWLMVM_VENDOR_CMDS
	mvm->sar_chain_a_profile = prof_a;
	mvm->sar_chain_b_profile = prof_b;
#endif
	IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");

	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
コード例 #10
0
ファイル: fw.c プロジェクト: OSEC-pl/backports-iwlwifi
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
					 enum iwl_ucode_type ucode_type)
{
	struct iwl_notification_wait alive_wait;
	struct iwl_mvm_alive_data alive_data;
	const struct fw_img *fw;
	int ret, i;
	enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
	static const u16 alive_cmd[] = { MVM_ALIVE };

	set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
	if (ucode_type == IWL_UCODE_REGULAR &&
	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
	    !(fw_has_capa(&mvm->fw->ucode_capa,
			  IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
		fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
	else
		fw = iwl_get_ucode_image(mvm->fw, ucode_type);
	if (WARN_ON(!fw))
		return -EINVAL;
	iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);

	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
				   alive_cmd, ARRAY_SIZE(alive_cmd),
				   iwl_alive_fn, &alive_data);

	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
	if (ret) {
		iwl_fw_set_current_image(&mvm->fwrt, old_type);
		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
		return ret;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the ALIVE notification here.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);

#ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
	/* let's force the timeout if required */
	if (unlikely(mvm->trans->dbg_cfg.fw_alive_timeout)) {
		IWL_INFO(mvm, "Forcing fw alive notification timeout\n");
		ret = -ETIMEDOUT;
	}
#endif
	if (ret) {
		struct iwl_trans *trans = mvm->trans;

		if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
			IWL_ERR(mvm,
				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
				iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
				iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
		else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
			IWL_ERR(mvm,
				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
				iwl_read_prph(trans, SB_CPU_1_STATUS),
				iwl_read_prph(trans, SB_CPU_2_STATUS));
		iwl_fw_set_current_image(&mvm->fwrt, old_type);
		return ret;
	}

	if (!alive_data.valid) {
		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
		iwl_fw_set_current_image(&mvm->fwrt, old_type);
		return -EIO;
	}

	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);

	/*
	 * Note: all the queues are enabled as part of the interface
	 * initialization, but in firmware restart scenarios they
	 * could be stopped, so wake them up. In firmware restart,
	 * mac80211 will have the queues stopped as well until the
	 * reconfiguration completes. During normal startup, they
	 * will be empty.
	 */

	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
	/*
	 * Set a 'fake' TID for the command queue, since we use the
	 * hweight() of the tid_bitmap as a refcount now. Not that
	 * we ever even consider the command queue as one we might
	 * want to reuse, but be safe nevertheless.
	 */
	mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
		BIT(IWL_MAX_TID_COUNT + 2);

	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);

	set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
	clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);

	return 0;
}
コード例 #11
0
ファイル: fw.c プロジェクト: OSEC-pl/backports-iwlwifi
int iwl_mvm_up(struct iwl_mvm *mvm)
{
	int ret, i;
	struct ieee80211_channel *chan;
	struct cfg80211_chan_def chandef;

	lockdep_assert_held(&mvm->mutex);

	ret = iwl_trans_start_hw(mvm->trans);
	if (ret)
		return ret;

	ret = iwl_mvm_load_rt_fw(mvm);
	if (ret) {
		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
		goto error;
	}

	iwl_get_shared_mem_conf(&mvm->fwrt);

	ret = iwl_mvm_sf_update(mvm, NULL, false);
	if (ret)
		IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");

#ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
	iwl_dnt_start(mvm->trans);
#endif

	mvm->fwrt.dump.conf = FW_DBG_INVALID;
	/* if we have a destination, assume EARLY START */
	if (mvm->fw->dbg.dest_tlv)
		mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
	iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);

#ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS
	if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_LATENCY)) {
		struct iwl_fw_dbg_trigger_tlv *trig;
		struct iwl_fw_dbg_trigger_tx_latency *thrshold_trig;
		u32 thrshld;
		u32 vif;
		u32 iface = 0;
		u16 tid;
		u16 mode;
		u32 window;

		trig = iwl_fw_dbg_get_trigger(mvm->fw,
					      FW_DBG_TRIGGER_TX_LATENCY);
		vif = le32_to_cpu(trig->vif_type);
		if (vif == IWL_FW_DBG_CONF_VIF_ANY) {
			iface = BIT(IEEE80211_TX_LATENCY_BSS);
			iface |= BIT(IEEE80211_TX_LATENCY_P2P);
		} else if (vif <= IWL_FW_DBG_CONF_VIF_AP) {
			iface = BIT(IEEE80211_TX_LATENCY_BSS);
		} else {
			iface = BIT(IEEE80211_TX_LATENCY_P2P);
		}
		thrshold_trig = (void *)trig->data;
		thrshld = le32_to_cpu(thrshold_trig->thrshold);
		tid = le16_to_cpu(thrshold_trig->tid_bitmap);
		mode = le16_to_cpu(thrshold_trig->mode);
		window = le32_to_cpu(thrshold_trig->window);
		IWL_DEBUG_INFO(mvm,
			       "Tx latency trigger cfg: threshold = %u, tid = 0x%x, mode = 0x%x, window = %u vif = 0x%x\n",
			       thrshld, tid, mode, window, iface);
		ieee80211_tx_lat_thrshld_cfg(mvm->hw, thrshld,
					     tid, window, mode, iface);
	}
#endif

	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
	if (ret)
		goto error;

	if (!iwl_mvm_has_unified_ucode(mvm)) {
		/* Send phy db control command and then phy db calibration */
		ret = iwl_send_phy_db_data(mvm->phy_db);
		if (ret)
			goto error;

		ret = iwl_send_phy_cfg_cmd(mvm);
		if (ret)
			goto error;
	}

	ret = iwl_mvm_send_bt_init_conf(mvm);
	if (ret)
		goto error;

	if (fw_has_capa(&mvm->fw->ucode_capa,
			IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
		ret = iwl_set_soc_latency(mvm);
		if (ret)
			goto error;
	}

	/* Init RSS configuration */
	if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
		ret = iwl_configure_rxq(mvm);
		if (ret) {
			IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
				ret);
			goto error;
		}
	}

	if (iwl_mvm_has_new_rx_api(mvm)) {
		ret = iwl_send_rss_cfg_cmd(mvm);
		if (ret) {
			IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
				ret);
			goto error;
		}
	}

	/* init the fw <-> mac80211 STA mapping */
	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);

	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;

	/* reset quota debouncing buffer - 0xff will yield invalid data */
	memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));

	ret = iwl_mvm_send_dqa_cmd(mvm);
	if (ret)
		goto error;

	/* Add auxiliary station for scanning */
	ret = iwl_mvm_add_aux_sta(mvm);
	if (ret)
		goto error;

	/* Add all the PHY contexts */
	chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
	cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
	for (i = 0; i < NUM_PHY_CTX; i++) {
		/*
		 * The channel used here isn't relevant as it's
		 * going to be overwritten in the other flows.
		 * For now use the first channel we have.
		 */
		ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
					   &chandef, 1, 1);
		if (ret)
			goto error;
	}

#ifdef CONFIG_THERMAL
	if (iwl_mvm_is_tt_in_fw(mvm)) {
		/* in order to give the responsibility of ct-kill and
		 * TX backoff to FW we need to send empty temperature reporting
		 * cmd during init time
		 */
		iwl_mvm_send_temp_report_ths_cmd(mvm);
	} else {
		/* Initialize tx backoffs to the minimal possible */
		iwl_mvm_tt_tx_backoff(mvm, 0);
	}

	/* TODO: read the budget from BIOS / Platform NVM */

	/*
	 * In case there is no budget from BIOS / Platform NVM the default
	 * budget should be 2000mW (cooling state 0).
	 */
	if (iwl_mvm_is_ctdp_supported(mvm)) {
		ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
					   mvm->cooling_dev.cur_state);
		if (ret)
			goto error;
	}
#else
	/* Initialize tx backoffs to the minimal possible */
	iwl_mvm_tt_tx_backoff(mvm, 0);
#endif

	WARN_ON(iwl_mvm_config_ltr(mvm));

	ret = iwl_mvm_power_update_device(mvm);
	if (ret)
		goto error;

	/*
	 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
	 * anyway, so don't init MCC.
	 */
	if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
		ret = iwl_mvm_init_mcc(mvm);
		if (ret)
			goto error;
	}

	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
		mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
		mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
		ret = iwl_mvm_config_scan(mvm);
		if (ret)
			goto error;
	}

	/* allow FW/transport low power modes if not during restart */
	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
		iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);

#ifdef CPTCFG_IWLWIFI_LTE_COEX
	iwl_mvm_send_lte_commands(mvm);
#endif

#ifdef CPTCFG_IWLMVM_VENDOR_CMDS
	/* set_mode must be IWL_TX_POWER_MODE_SET_DEVICE if this was
	 * ever initialized.
	 */
	if (le32_to_cpu(mvm->txp_cmd.v5.v3.set_mode) ==
	    IWL_TX_POWER_MODE_SET_DEVICE) {
		int len;

		if (fw_has_api(&mvm->fw->ucode_capa,
			       IWL_UCODE_TLV_API_REDUCE_TX_POWER))
			len = sizeof(mvm->txp_cmd.v5);
		else if (fw_has_capa(&mvm->fw->ucode_capa,
				     IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
			len = sizeof(mvm->txp_cmd.v4);
		else
			len = sizeof(mvm->txp_cmd.v4.v3);

		if (iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
					 len, &mvm->txp_cmd))
			IWL_ERR(mvm, "failed to update TX power\n");
	}
#endif

#ifdef CPTCFG_IWLWIFI_FRQ_MGR
	iwl_mvm_fm_notify_current_dcdc();
#endif

	ret = iwl_mvm_sar_init(mvm);
	if (ret)
		goto error;

	ret = iwl_mvm_sar_geo_init(mvm);
	if (ret)
		goto error;

	iwl_mvm_leds_sync(mvm);

	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
	return 0;
 error:
	if (!iwlmvm_mod_params.init_dbg || !ret)
		iwl_mvm_stop_device(mvm);
	return ret;
}
コード例 #12
0
ファイル: fw-dbg.c プロジェクト: rldleblanc/linux
static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
			       struct iwl_fw_error_dump_data **dump_data)
{
	struct iwl_fw_error_dump_fifo *fifo_hdr;
	struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg;
	u32 *fifo_data;
	u32 fifo_len;
	unsigned long flags;
	int i, j;

	if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
		return;

	/* Pull RXF1 */
	iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
	/* Pull RXF2 */
	iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size,
			 RXF_DIFF_FROM_PREV, 1);
	/* Pull LMAC2 RXF1 */
	if (mvm->smem_cfg.num_lmacs > 1)
		iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size,
				 LMAC2_PRPH_OFFSET, 2);

	/* Pull TXF data from LMAC1 */
	for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
		/* Mark the number of TXF we're pulling now */
		iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
		iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i],
				 0, i);
	}

	/* Pull TXF data from LMAC2 */
	if (mvm->smem_cfg.num_lmacs > 1) {
		for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
			/* Mark the number of TXF we're pulling now */
			iwl_trans_write_prph(mvm->trans,
					     TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
					     i);
			iwl_mvm_dump_txf(mvm, dump_data,
					 cfg->lmac[1].txfifo_size[i],
					 LMAC2_PRPH_OFFSET,
					 i + cfg->num_txfifo_entries);
		}
	}

	if (fw_has_capa(&mvm->fw->ucode_capa,
			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
		/* Pull UMAC internal TXF data from all TXFs */
		for (i = 0;
		     i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
		     i++) {
			fifo_hdr = (void *)(*dump_data)->data;
			fifo_data = (void *)fifo_hdr->data;
			fifo_len = mvm->smem_cfg.internal_txfifo_size[i];

			/* No need to try to read the data if the length is 0 */
			if (fifo_len == 0)
				continue;

			/* Add a TLV for the internal FIFOs */
			(*dump_data)->type =
				cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
			(*dump_data)->len =
				cpu_to_le32(fifo_len + sizeof(*fifo_hdr));

			fifo_hdr->fifo_num = cpu_to_le32(i);

			/* Mark the number of TXF we're pulling now */
			iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i +
				mvm->smem_cfg.num_txfifo_entries);

			fifo_hdr->available_bytes =
				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
								TXF_CPU2_FIFO_ITEM_CNT));
			fifo_hdr->wr_ptr =
				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
								TXF_CPU2_WR_PTR));
			fifo_hdr->rd_ptr =
				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
								TXF_CPU2_RD_PTR));
			fifo_hdr->fence_ptr =
				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
								TXF_CPU2_FENCE_PTR));
			fifo_hdr->fence_mode =
				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
								TXF_CPU2_LOCK_FENCE));

			/* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
			iwl_trans_write_prph(mvm->trans,
					     TXF_CPU2_READ_MODIFY_ADDR,
					     TXF_CPU2_WR_PTR);

			/* Dummy-read to advance the read pointer to head */
			iwl_trans_read_prph(mvm->trans,
					    TXF_CPU2_READ_MODIFY_DATA);

			/* Read FIFO */
			fifo_len /= sizeof(u32); /* Size in DWORDS */
			for (j = 0; j < fifo_len; j++)
				fifo_data[j] =
					iwl_trans_read_prph(mvm->trans,
							    TXF_CPU2_READ_MODIFY_DATA);
			*dump_data = iwl_fw_error_next_data(*dump_data);
		}
	}

	iwl_trans_release_nic_access(mvm->trans, &flags);
}
コード例 #13
0
ファイル: nvm.c プロジェクト: asmalldev/linux
struct iwl_mcc_update_resp *
iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
		   enum iwl_mcc_source src_id)
{
	struct iwl_mcc_update_cmd mcc_update_cmd = {
		.mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
		.source_id = (u8)src_id,
	};
	struct iwl_mcc_update_resp *resp_cp;
	struct iwl_rx_packet *pkt;
	struct iwl_host_cmd cmd = {
		.id = MCC_UPDATE_CMD,
		.flags = CMD_WANT_SKB,
		.data = { &mcc_update_cmd },
	};

	int ret;
	u32 status;
	int resp_len, n_channels;
	u16 mcc;
	bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa,
				   IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2);

	if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
		return ERR_PTR(-EOPNOTSUPP);

	cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
	if (!resp_v2)
		cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1);

	IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
		      alpha2[0], alpha2[1], src_id);

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret)
		return ERR_PTR(ret);

	pkt = cmd.resp_pkt;

	/* Extract MCC response */
	if (resp_v2) {
		struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;

		n_channels =  __le32_to_cpu(mcc_resp->n_channels);
		resp_len = sizeof(struct iwl_mcc_update_resp) +
			   n_channels * sizeof(__le32);
		resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
	} else {
		struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;

		n_channels =  __le32_to_cpu(mcc_resp_v1->n_channels);
		resp_len = sizeof(struct iwl_mcc_update_resp) +
			   n_channels * sizeof(__le32);
		resp_cp = kzalloc(resp_len, GFP_KERNEL);

		if (resp_cp) {
			resp_cp->status = mcc_resp_v1->status;
			resp_cp->mcc = mcc_resp_v1->mcc;
			resp_cp->cap = mcc_resp_v1->cap;
			resp_cp->source_id = mcc_resp_v1->source_id;
			resp_cp->n_channels = mcc_resp_v1->n_channels;
			memcpy(resp_cp->channels, mcc_resp_v1->channels,
			       n_channels * sizeof(__le32));
		}
	}

	if (!resp_cp) {
		ret = -ENOMEM;
		goto exit;
	}

	status = le32_to_cpu(resp_cp->status);

	mcc = le16_to_cpu(resp_cp->mcc);

	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
	if (mcc == 0) {
		mcc = 0x3030;  /* "00" - world */
		resp_cp->mcc = cpu_to_le16(mcc);
	}

	IWL_DEBUG_LAR(mvm,
		      "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
		      status, mcc, mcc >> 8, mcc & 0xff,
		      !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);

exit:
	iwl_free_resp(&cmd);
	if (ret)
		return ERR_PTR(ret);
	return resp_cp;
}

int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
{
	bool tlv_lar;
	bool nvm_lar;
	int retval;
	struct ieee80211_regdomain *regd;
	char mcc[3];

	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
		tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
				      IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
		nvm_lar = mvm->nvm_data->lar_enabled;
		if (tlv_lar != nvm_lar)
			IWL_INFO(mvm,
				 "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
				 tlv_lar ? "enabled" : "disabled",
				 nvm_lar ? "enabled" : "disabled");
	}

	if (!iwl_mvm_is_lar_supported(mvm))
		return 0;

	/*
	 * try to replay the last set MCC to FW. If it doesn't exist,
	 * queue an update to cfg80211 to retrieve the default alpha2 from FW.
	 */
	retval = iwl_mvm_init_fw_regd(mvm);
	if (retval != -ENOENT)
		return retval;

	/*
	 * Driver regulatory hint for initial update, this also informs the
	 * firmware we support wifi location updates.
	 * Disallow scans that might crash the FW while the LAR regdomain
	 * is not set.
	 */
	mvm->lar_regdom_set = false;

	regd = iwl_mvm_get_current_regdomain(mvm, NULL);
	if (IS_ERR_OR_NULL(regd))
		return -EIO;

	if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
	    !iwl_get_bios_mcc(mvm->dev, mcc)) {
		kfree(regd);
		regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
					     MCC_SOURCE_BIOS, NULL);
		if (IS_ERR_OR_NULL(regd))
			return -EIO;
	}

	retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
	kfree(regd);
	return retval;
}
コード例 #14
0
ファイル: led.c プロジェクト: Anjali05/linux
static void iwl_mvm_send_led_fw_cmd(struct iwl_mvm *mvm, bool on)
{
	struct iwl_led_cmd led_cmd = {
		.status = cpu_to_le32(on),
	};
	struct iwl_host_cmd cmd = {
		.id = WIDE_ID(LONG_GROUP, LEDS_CMD),
		.len = { sizeof(led_cmd), },
		.data = { &led_cmd, },
		.flags = CMD_ASYNC,
	};
	int err;

	if (!iwl_mvm_firmware_running(mvm))
		return;

	err = iwl_mvm_send_cmd(mvm, &cmd);

	if (err)
		IWL_WARN(mvm, "LED command failed: %d\n", err);
}

static void iwl_mvm_led_set(struct iwl_mvm *mvm, bool on)
{
	if (fw_has_capa(&mvm->fw->ucode_capa,
			IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT)) {
		iwl_mvm_send_led_fw_cmd(mvm, on);
		return;
	}

	iwl_write32(mvm->trans, CSR_LED_REG,
		    on ? CSR_LED_REG_TURN_ON : CSR_LED_REG_TURN_OFF);
}

static void iwl_led_brightness_set(struct led_classdev *led_cdev,
				   enum led_brightness brightness)
{
	struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led);

	iwl_mvm_led_set(mvm, brightness > 0);
}

int iwl_mvm_leds_init(struct iwl_mvm *mvm)
{
	int mode = iwlwifi_mod_params.led_mode;
	int ret;

	switch (mode) {
	case IWL_LED_BLINK:
		IWL_ERR(mvm, "Blink led mode not supported, used default\n");
		/* fall through */
	case IWL_LED_DEFAULT:
	case IWL_LED_RF_STATE:
		mode = IWL_LED_RF_STATE;
		break;
	case IWL_LED_DISABLE:
		IWL_INFO(mvm, "Led disabled\n");
		return 0;
	default:
		return -EINVAL;
	}

	mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
				   wiphy_name(mvm->hw->wiphy));
	mvm->led.brightness_set = iwl_led_brightness_set;
	mvm->led.max_brightness = 1;

	if (mode == IWL_LED_RF_STATE)
		mvm->led.default_trigger =
			ieee80211_get_radio_led_name(mvm->hw);

	ret = led_classdev_register(mvm->trans->dev, &mvm->led);
	if (ret) {
		kfree(mvm->led.name);
		IWL_INFO(mvm, "Failed to enable led\n");
		return ret;
	}

	mvm->init_status |= IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
	return 0;
}