Exemplo n.º 1
0
Arquivo: tt.c Projeto: Lyude/linux
int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
{
	struct iwl_notification_wait wait_temp_notif;
	static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
					    DTS_MEASUREMENT_NOTIF_WIDE) };
	int ret;

	lockdep_assert_held(&mvm->mutex);

	iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
				   temp_notif, ARRAY_SIZE(temp_notif),
				   iwl_mvm_temp_notif_wait, temp);

	ret = iwl_mvm_get_temp_cmd(mvm);
	if (ret) {
		IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret);
		iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif);
		return ret;
	}

	ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif,
				    IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT);
	if (ret)
		IWL_ERR(mvm, "Getting the temperature timed out\n");

	return ret;
}
static int iwlagn_disable_pan(struct iwl_priv *priv,
			      struct iwl_rxon_context *ctx,
			      struct iwl_rxon_cmd *send)
{
	struct iwl_notification_wait disable_wait;
	__le32 old_filter = send->filter_flags;
	u8 old_dev_type = send->dev_type;
	int ret;

	iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
				   REPLY_WIPAN_DEACTIVATION_COMPLETE,
				   NULL, NULL);

	send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
	send->dev_type = RXON_DEV_TYPE_P2P;
	ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
				CMD_SYNC, sizeof(*send), send);

	send->filter_flags = old_filter;
	send->dev_type = old_dev_type;

	if (ret) {
		IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
		iwl_remove_notification(&priv->notif_wait, &disable_wait);
	} else {
		ret = iwl_wait_notification(&priv->notif_wait,
					    &disable_wait, HZ);
		if (ret)
			IWL_ERR(priv, "Timed out waiting for PAN disable\n");
	}

	return ret;
}
Exemplo n.º 3
0
static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
{
	struct iwl_notification_wait calib_wait;
	int ret;

	iwl_init_notification_wait(priv->shrd, &calib_wait,
				      CALIBRATION_COMPLETE_NOTIFICATION,
				      NULL, NULL);
	ret = iwl_init_alive_start(trans(priv));
	if (ret) {
		IWL_DEBUG_INFO(priv,
			"Error configuring init calibration: %d\n", ret);
		goto cfg_init_calib_error;
	}

	ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ);
	if (ret)
		IWL_DEBUG_INFO(priv, "Error detecting"
			" CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
	return ret;

cfg_init_calib_error:
	iwl_remove_notification(priv->shrd, &calib_wait);
	return ret;
}
Exemplo n.º 4
0
static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
{
	struct iwl_notification_wait calib_wait;
	static const u8 calib_complete[] = {
		CALIBRATION_COMPLETE_NOTIFICATION
	};
	int ret;

	iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
				   calib_complete, ARRAY_SIZE(calib_complete),
				   NULL, NULL);
	ret = iwl_init_alive_start(priv);
	if (ret) {
		IWL_ERR(priv, "Fail init calibration: %d\n", ret);
		goto cfg_init_calib_error;
	}

	ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
	if (ret)
		IWL_ERR(priv, "Error detecting"
			" CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
	return ret;

cfg_init_calib_error:
	iwl_remove_notification(&priv->notif_wait, &calib_wait);
	return ret;
}
Exemplo n.º 5
0
Arquivo: scan.c Projeto: 3bsa/linux
int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
{
	int ret;
	struct iwl_notification_wait wait_scan_done;
	static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
	bool sched = mvm->scan_status == IWL_MVM_SCAN_SCHED;

	lockdep_assert_held(&mvm->mutex);

	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
		return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
					  notify);

	if (mvm->scan_status == IWL_MVM_SCAN_NONE)
		return 0;

	if (iwl_mvm_is_radio_killed(mvm)) {
		ret = 0;
		goto out;
	}

	iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
				   scan_done_notif,
				   ARRAY_SIZE(scan_done_notif),
				   NULL, NULL);

	ret = iwl_mvm_send_scan_offload_abort(mvm);
	if (ret) {
		IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
			       sched ? "offloaded " : "", ret);
		iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
		goto out;
	}

	IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
		       sched ? "offloaded " : "");

	ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
out:
	/*
	 * Clear the scan status so the next scan requests will succeed. This
	 * also ensures the Rx handler doesn't do anything, as the scan was
	 * stopped from above. Since the rx handler won't do anything now,
	 * we have to release the scan reference here.
	 */
	if (mvm->scan_status == IWL_MVM_SCAN_OS)
		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);

	mvm->scan_status = IWL_MVM_SCAN_NONE;

	if (notify) {
		if (sched)
			ieee80211_sched_scan_stopped(mvm->hw);
		else
			ieee80211_scan_completed(mvm->hw, true);
	}

	return ret;
}
Exemplo n.º 6
0
/* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{
	int blk_idx;
	__le32 dev_phy_addr;
	struct iwl_fw_paging_cmd fw_paging_cmd = {
		.flags =
			cpu_to_le32(PAGING_CMD_IS_SECURED |
				    PAGING_CMD_IS_ENABLED |
				    (mvm->num_of_pages_in_last_blk <<
				    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
		.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
		.block_num = cpu_to_le32(mvm->num_of_paging_blk),
	};

	/* loop for for all paging blocks + CSS block */
	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
		dev_phy_addr =
			cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
				    PAGE_2_EXP_SIZE);
		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
	}

	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
						    IWL_ALWAYS_LONG_GROUP, 0),
				    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
}

/*
 * Send paging item cmd to FW in case CPU2 has paging image
 */
static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
{
	int ret;
	struct iwl_fw_get_item_cmd fw_get_item_cmd = {
		.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
	};

	struct iwl_fw_get_item_resp *item_resp;
	struct iwl_host_cmd cmd = {
		.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
		.data = { &fw_get_item_cmd, },
	};

	cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret) {
		IWL_ERR(mvm,
			"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
			ret);
		return ret;
	}

	item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
	if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
		IWL_ERR(mvm,
			"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
			le32_to_cpu(item_resp->item_id));
		ret = -EIO;
		goto exit;
	}

	mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
						  GFP_KERNEL);
	if (!mvm->trans->paging_download_buf) {
		ret = -ENOMEM;
		goto exit;
	}
	mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
	mvm->trans->paging_db = mvm->fw_paging_db;
	IWL_DEBUG_FW(mvm,
		     "Paging: got paging request address (paging_req_addr 0x%08x)\n",
		     mvm->trans->paging_req_addr);

exit:
	iwl_free_resp(&cmd);

	return ret;
}

static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
			 struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_mvm *mvm =
		container_of(notif_wait, struct iwl_mvm, notif_wait);
	struct iwl_mvm_alive_data *alive_data = data;
	struct mvm_alive_resp_ver1 *palive1;
	struct mvm_alive_resp_ver2 *palive2;
	struct mvm_alive_resp *palive;

	if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
		palive1 = (void *)pkt->data;

		mvm->support_umac_log = false;
		mvm->error_event_table =
			le32_to_cpu(palive1->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive1->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);

		alive_data->valid = le16_to_cpu(palive1->status) ==
				    IWL_ALIVE_STATUS_OK;
		IWL_DEBUG_FW(mvm,
			     "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive1->status), palive1->ver_type,
			     palive1->ver_subtype, palive1->flags);
	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
		palive2 = (void *)pkt->data;

		mvm->error_event_table =
			le32_to_cpu(palive2->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive2->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
		mvm->umac_error_event_table =
			le32_to_cpu(palive2->error_info_addr);
		mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
		mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);

		alive_data->valid = le16_to_cpu(palive2->status) ==
				    IWL_ALIVE_STATUS_OK;
		if (mvm->umac_error_event_table)
			mvm->support_umac_log = true;

		IWL_DEBUG_FW(mvm,
			     "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive2->status), palive2->ver_type,
			     palive2->ver_subtype, palive2->flags);

		IWL_DEBUG_FW(mvm,
			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
			     palive2->umac_major, palive2->umac_minor);
	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
		palive = (void *)pkt->data;

		mvm->error_event_table =
			le32_to_cpu(palive->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
		mvm->umac_error_event_table =
			le32_to_cpu(palive->error_info_addr);
		mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
		mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);

		alive_data->valid = le16_to_cpu(palive->status) ==
				    IWL_ALIVE_STATUS_OK;
		if (mvm->umac_error_event_table)
			mvm->support_umac_log = true;

		IWL_DEBUG_FW(mvm,
			     "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive->status), palive->ver_type,
			     palive->ver_subtype, palive->flags);

		IWL_DEBUG_FW(mvm,
			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
			     le32_to_cpu(palive->umac_major),
			     le32_to_cpu(palive->umac_minor));
	}

	return true;
}

static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
				  struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_phy_db *phy_db = data;

	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
		return true;
	}

	WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));

	return false;
}

static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
					 enum iwl_ucode_type ucode_type)
{
	struct iwl_notification_wait alive_wait;
	struct iwl_mvm_alive_data alive_data;
	const struct fw_img *fw;
	int ret, i;
	enum iwl_ucode_type old_type = mvm->cur_ucode;
	static const u16 alive_cmd[] = { MVM_ALIVE };
	struct iwl_sf_region st_fwrd_space;

	if (ucode_type == IWL_UCODE_REGULAR &&
	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
		fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
	else
		fw = iwl_get_ucode_image(mvm, ucode_type);
	if (WARN_ON(!fw))
		return -EINVAL;
	mvm->cur_ucode = ucode_type;
	mvm->ucode_loaded = false;

	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
				   alive_cmd, ARRAY_SIZE(alive_cmd),
				   iwl_alive_fn, &alive_data);

	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
	if (ret) {
		mvm->cur_ucode = old_type;
		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
		return ret;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the ALIVE notification here.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);
	if (ret) {
		if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
			IWL_ERR(mvm,
				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
				iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
				iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
		mvm->cur_ucode = old_type;
		return ret;
	}

	if (!alive_data.valid) {
		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
		mvm->cur_ucode = old_type;
		return -EIO;
	}

	/*
	 * update the sdio allocation according to the pointer we get in the
	 * alive notification.
	 */
	st_fwrd_space.addr = mvm->sf_space.addr;
	st_fwrd_space.size = mvm->sf_space.size;
	ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
	if (ret) {
		IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
		return ret;
	}

	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);

	/*
	 * configure and operate fw paging mechanism.
	 * driver configures the paging flow only once, CPU2 paging image
	 * included in the IWL_UCODE_INIT image.
	 */
	if (fw->paging_mem_size) {
		/*
		 * When dma is not enabled, the driver needs to copy / write
		 * the downloaded / uploaded page to / from the smem.
		 * This gets the location of the place were the pages are
		 * stored.
		 */
		if (!is_device_dma_capable(mvm->trans->dev)) {
			ret = iwl_trans_get_paging_item(mvm);
			if (ret) {
				IWL_ERR(mvm, "failed to get FW paging item\n");
				return ret;
			}
		}

		ret = iwl_save_fw_paging(mvm, fw);
		if (ret) {
			IWL_ERR(mvm, "failed to save the FW paging image\n");
			return ret;
		}

		ret = iwl_send_paging_cmd(mvm, fw);
		if (ret) {
			IWL_ERR(mvm, "failed to send the paging cmd\n");
			iwl_free_fw_paging(mvm);
			return ret;
		}
	}

	/*
	 * Note: all the queues are enabled as part of the interface
	 * initialization, but in firmware restart scenarios they
	 * could be stopped, so wake them up. In firmware restart,
	 * mac80211 will have the queues stopped as well until the
	 * reconfiguration completes. During normal startup, they
	 * will be empty.
	 */

	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
	mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;

	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);

	mvm->ucode_loaded = true;

	return 0;
}

static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
	struct iwl_phy_cfg_cmd phy_cfg_cmd;
	enum iwl_ucode_type ucode_type = mvm->cur_ucode;

	/* Set parameters */
	phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
	phy_cfg_cmd.calib_control.event_trigger =
		mvm->fw->default_calib[ucode_type].event_trigger;
	phy_cfg_cmd.calib_control.flow_trigger =
		mvm->fw->default_calib[ucode_type].flow_trigger;

	IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
		       phy_cfg_cmd.phy_cfg);

	return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
				    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
}

int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
	struct iwl_notification_wait calib_wait;
	static const u16 init_complete[] = {
		INIT_COMPLETE_NOTIF,
		CALIB_RES_NOTIF_PHY_DB
	};
	int ret;

	lockdep_assert_held(&mvm->mutex);

	if (WARN_ON_ONCE(mvm->calibrating))
		return 0;

	iwl_init_notification_wait(&mvm->notif_wait,
				   &calib_wait,
				   init_complete,
				   ARRAY_SIZE(init_complete),
				   iwl_wait_phy_db_entry,
				   mvm->phy_db);

	/* Will also start the device */
	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
	if (ret) {
		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
		goto error;
	}

	ret = iwl_send_bt_init_conf(mvm);
	if (ret)
		goto error;

	/* Read the NVM only at driver load time, no need to do this twice */
	if (read_nvm) {
		/* Read nvm */
		ret = iwl_nvm_init(mvm, true);
		if (ret) {
			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
			goto error;
		}
	}

	/* In case we read the NVM from external file, load it to the NIC */
	if (mvm->nvm_file_name)
		iwl_mvm_load_nvm_to_nic(mvm);

	ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
	WARN_ON(ret);

	/*
	 * abort after reading the nvm in case RF Kill is on, we will complete
	 * the init seq later when RF kill will switch to off
	 */
	if (iwl_mvm_is_radio_hw_killed(mvm)) {
		IWL_DEBUG_RF_KILL(mvm,
				  "jump over all phy activities due to RF kill\n");
		iwl_remove_notification(&mvm->notif_wait, &calib_wait);
		ret = 1;
		goto out;
	}

	mvm->calibrating = true;

	/* Send TX valid antennas before triggering calibrations */
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
	if (ret)
		goto error;

	/*
	 * Send phy configurations command to init uCode
	 * to start the 16.0 uCode init image internal calibrations.
	 */
	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret) {
		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
			ret);
		goto error;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the calibration complete notification.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
			MVM_UCODE_CALIB_TIMEOUT);

	if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
		ret = 1;
	}
	goto out;

error:
	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
	mvm->calibrating = false;
	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
		/* we want to debug INIT and we have no NVM - fake */
		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
					sizeof(struct ieee80211_channel) +
					sizeof(struct ieee80211_rate),
					GFP_KERNEL);
		if (!mvm->nvm_data)
			return -ENOMEM;
		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
		mvm->nvm_data->bands[0].n_channels = 1;
		mvm->nvm_data->bands[0].n_bitrates = 1;
		mvm->nvm_data->bands[0].bitrates =
			(void *)mvm->nvm_data->channels + 1;
		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
	}

	return ret;
}

static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
{
	struct iwl_host_cmd cmd = {
		.id = SHARED_MEM_CFG,
		.flags = CMD_WANT_SKB,
		.data = { NULL, },
		.len = { 0, },
	};
	struct iwl_rx_packet *pkt;
	struct iwl_shared_mem_cfg *mem_cfg;
	u32 i;

	lockdep_assert_held(&mvm->mutex);

	if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
		return;

	pkt = cmd.resp_pkt;
	mem_cfg = (void *)pkt->data;

	mvm->shared_mem_cfg.shared_mem_addr =
		le32_to_cpu(mem_cfg->shared_mem_addr);
	mvm->shared_mem_cfg.shared_mem_size =
		le32_to_cpu(mem_cfg->shared_mem_size);
	mvm->shared_mem_cfg.sample_buff_addr =
		le32_to_cpu(mem_cfg->sample_buff_addr);
	mvm->shared_mem_cfg.sample_buff_size =
		le32_to_cpu(mem_cfg->sample_buff_size);
	mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
		mvm->shared_mem_cfg.txfifo_size[i] =
			le32_to_cpu(mem_cfg->txfifo_size[i]);
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
		mvm->shared_mem_cfg.rxfifo_size[i] =
			le32_to_cpu(mem_cfg->rxfifo_size[i]);
	mvm->shared_mem_cfg.page_buff_addr =
		le32_to_cpu(mem_cfg->page_buff_addr);
	mvm->shared_mem_cfg.page_buff_size =
		le32_to_cpu(mem_cfg->page_buff_size);
	IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");

	iwl_free_resp(&cmd);
}
Exemplo n.º 7
0
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
	struct iwl_notification_wait calib_wait;
	static const u8 init_complete[] = {
		INIT_COMPLETE_NOTIF,
		CALIB_RES_NOTIF_PHY_DB
	};
	int ret;

	lockdep_assert_held(&mvm->mutex);

	if (WARN_ON_ONCE(mvm->calibrating))
		return 0;

	iwl_init_notification_wait(&mvm->notif_wait,
				   &calib_wait,
				   init_complete,
				   ARRAY_SIZE(init_complete),
				   iwl_wait_phy_db_entry,
				   mvm->phy_db);

	/* Will also start the device */
	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
	if (ret) {
		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
		goto error;
	}

	ret = iwl_send_bt_init_conf(mvm);
	if (ret)
		goto error;

	/* Read the NVM only at driver load time, no need to do this twice */
	if (read_nvm) {
		/* Read nvm */
		ret = iwl_nvm_init(mvm, true);
		if (ret) {
			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
			goto error;
		}
	}

	/* In case we read the NVM from external file, load it to the NIC */
	if (mvm->nvm_file_name)
		iwl_mvm_load_nvm_to_nic(mvm);

	ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
	WARN_ON(ret);

	/*
	 * abort after reading the nvm in case RF Kill is on, we will complete
	 * the init seq later when RF kill will switch to off
	 */
	if (iwl_mvm_is_radio_killed(mvm)) {
		IWL_DEBUG_RF_KILL(mvm,
				  "jump over all phy activities due to RF kill\n");
		iwl_remove_notification(&mvm->notif_wait, &calib_wait);
		ret = 1;
		goto out;
	}

	mvm->calibrating = true;

	/* Send TX valid antennas before triggering calibrations */
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
	if (ret)
		goto error;

	/*
	 * Send phy configurations command to init uCode
	 * to start the 16.0 uCode init image internal calibrations.
	 */
	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret) {
		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
			ret);
		goto error;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the calibration complete notification.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
			MVM_UCODE_CALIB_TIMEOUT);

	if (ret && iwl_mvm_is_radio_killed(mvm)) {
		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
		ret = 1;
	}
	goto out;

error:
	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
	mvm->calibrating = false;
	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
		/* we want to debug INIT and we have no NVM - fake */
		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
					sizeof(struct ieee80211_channel) +
					sizeof(struct ieee80211_rate),
					GFP_KERNEL);
		if (!mvm->nvm_data)
			return -ENOMEM;
		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
		mvm->nvm_data->bands[0].n_channels = 1;
		mvm->nvm_data->bands[0].n_bitrates = 1;
		mvm->nvm_data->bands[0].bitrates =
			(void *)mvm->nvm_data->channels + 1;
		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
	}

	return ret;
}
Exemplo n.º 8
0
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
					 enum iwl_ucode_type ucode_type)
{
	struct iwl_notification_wait alive_wait;
	struct iwl_mvm_alive_data alive_data;
	const struct fw_img *fw;
	int ret, i;
	enum iwl_ucode_type old_type = mvm->cur_ucode;
	static const u8 alive_cmd[] = { MVM_ALIVE };
	struct iwl_sf_region st_fwrd_space;

	if (ucode_type == IWL_UCODE_REGULAR &&
	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
		fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
	else
		fw = iwl_get_ucode_image(mvm, ucode_type);
	if (WARN_ON(!fw))
		return -EINVAL;
	mvm->cur_ucode = ucode_type;
	mvm->ucode_loaded = false;

	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
				   alive_cmd, ARRAY_SIZE(alive_cmd),
				   iwl_alive_fn, &alive_data);

	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
	if (ret) {
		mvm->cur_ucode = old_type;
		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
		return ret;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the ALIVE notification here.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);
	if (ret) {
		mvm->cur_ucode = old_type;
		return ret;
	}

	if (!alive_data.valid) {
		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
		mvm->cur_ucode = old_type;
		return -EIO;
	}

	/*
	 * update the sdio allocation according to the pointer we get in the
	 * alive notification.
	 */
	st_fwrd_space.addr = mvm->sf_space.addr;
	st_fwrd_space.size = mvm->sf_space.size;
	ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
	if (ret) {
		IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
		return ret;
	}

	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);

	/*
	 * Note: all the queues are enabled as part of the interface
	 * initialization, but in firmware restart scenarios they
	 * could be stopped, so wake them up. In firmware restart,
	 * mac80211 will have the queues stopped as well until the
	 * reconfiguration completes. During normal startup, they
	 * will be empty.
	 */

	for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
		if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
			mvm->queue_to_mac80211[i] = i;
		else
			mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
	}

	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);

	mvm->ucode_loaded = true;

	return 0;
}
Exemplo n.º 9
0
Arquivo: fw.c Projeto: alcorjjong/ath
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
					 enum iwl_ucode_type ucode_type)
{
	struct iwl_notification_wait alive_wait;
	struct iwl_mvm_alive_data alive_data;
	const struct fw_img *fw;
	int ret, i;
	enum iwl_ucode_type old_type = mvm->cur_ucode;
	static const u8 alive_cmd[] = { MVM_ALIVE };

	fw = iwl_get_ucode_image(mvm, ucode_type);
	if (WARN_ON(!fw))
		return -EINVAL;
	mvm->cur_ucode = ucode_type;
	mvm->ucode_loaded = false;

	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
				   alive_cmd, ARRAY_SIZE(alive_cmd),
				   iwl_alive_fn, &alive_data);

	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
	if (ret) {
		mvm->cur_ucode = old_type;
		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
		return ret;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the ALIVE notification here.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);
	if (ret) {
		mvm->cur_ucode = old_type;
		return ret;
	}

	if (!alive_data.valid) {
		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
		mvm->cur_ucode = old_type;
		return -EIO;
	}

	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);

	/*
	 * Note: all the queues are enabled as part of the interface
	 * initialization, but in firmware restart scenarios they
	 * could be stopped, so wake them up. In firmware restart,
	 * mac80211 will have the queues stopped as well until the
	 * reconfiguration completes. During normal startup, they
	 * will be empty.
	 */

	for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
		if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
			mvm->queue_to_mac80211[i] = i;
		else
			mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
		atomic_set(&mvm->queue_stop_count[i], 0);
	}

	mvm->transport_queue_stop = 0;

	mvm->ucode_loaded = true;

	return 0;
}
Exemplo n.º 10
0
static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
	struct iwl_notification_wait init_wait;
	struct iwl_nvm_access_complete_cmd nvm_complete = {};
	struct iwl_init_extended_cfg_cmd init_cfg = {
		.init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
	};
	static const u16 init_complete[] = {
		INIT_COMPLETE_NOTIF,
	};
	int ret;

	lockdep_assert_held(&mvm->mutex);

	iwl_init_notification_wait(&mvm->notif_wait,
				   &init_wait,
				   init_complete,
				   ARRAY_SIZE(init_complete),
				   iwl_wait_init_complete,
				   NULL);

	/* Will also start the device */
	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
	if (ret) {
		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
		goto error;
	}

	/* Send init config command to mark that we are sending NVM access
	 * commands
	 */
	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
						INIT_EXTENDED_CFG_CMD), 0,
				   sizeof(init_cfg), &init_cfg);
	if (ret) {
		IWL_ERR(mvm, "Failed to run init config command: %d\n",
			ret);
		goto error;
	}

	/* Load NVM to NIC if needed */
	if (mvm->nvm_file_name) {
		iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
				      mvm->nvm_sections);
		iwl_mvm_load_nvm_to_nic(mvm);
	}

	if (IWL_MVM_PARSE_NVM && read_nvm) {
		ret = iwl_nvm_init(mvm);
		if (ret) {
			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
			goto error;
		}
	}

	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
						NVM_ACCESS_COMPLETE), 0,
				   sizeof(nvm_complete), &nvm_complete);
	if (ret) {
		IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
			ret);
		goto error;
	}

	/* We wait for the INIT complete notification */
	ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);
	if (ret)
		return ret;

	/* Read the NVM only at driver load time, no need to do this twice */
	if (!IWL_MVM_PARSE_NVM && read_nvm) {
		mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
		if (IS_ERR(mvm->nvm_data)) {
			ret = PTR_ERR(mvm->nvm_data);
			mvm->nvm_data = NULL;
			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
			return ret;
		}
	}

	return 0;

error:
	iwl_remove_notification(&mvm->notif_wait, &init_wait);
	return ret;
}
Exemplo n.º 11
0
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
					 enum iwl_ucode_type ucode_type)
{
	struct iwl_notification_wait alive_wait;
	struct iwl_mvm_alive_data alive_data;
	const struct fw_img *fw;
	int ret, i;
	enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
	static const u16 alive_cmd[] = { MVM_ALIVE };

	set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
	if (ucode_type == IWL_UCODE_REGULAR &&
	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
	    !(fw_has_capa(&mvm->fw->ucode_capa,
			  IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
		fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
	else
		fw = iwl_get_ucode_image(mvm->fw, ucode_type);
	if (WARN_ON(!fw))
		return -EINVAL;
	iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);

	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
				   alive_cmd, ARRAY_SIZE(alive_cmd),
				   iwl_alive_fn, &alive_data);

	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
	if (ret) {
		iwl_fw_set_current_image(&mvm->fwrt, old_type);
		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
		return ret;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the ALIVE notification here.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);

#ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
	/* let's force the timeout if required */
	if (unlikely(mvm->trans->dbg_cfg.fw_alive_timeout)) {
		IWL_INFO(mvm, "Forcing fw alive notification timeout\n");
		ret = -ETIMEDOUT;
	}
#endif
	if (ret) {
		struct iwl_trans *trans = mvm->trans;

		if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
			IWL_ERR(mvm,
				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
				iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
				iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
		else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
			IWL_ERR(mvm,
				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
				iwl_read_prph(trans, SB_CPU_1_STATUS),
				iwl_read_prph(trans, SB_CPU_2_STATUS));
		iwl_fw_set_current_image(&mvm->fwrt, old_type);
		return ret;
	}

	if (!alive_data.valid) {
		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
		iwl_fw_set_current_image(&mvm->fwrt, old_type);
		return -EIO;
	}

	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);

	/*
	 * Note: all the queues are enabled as part of the interface
	 * initialization, but in firmware restart scenarios they
	 * could be stopped, so wake them up. In firmware restart,
	 * mac80211 will have the queues stopped as well until the
	 * reconfiguration completes. During normal startup, they
	 * will be empty.
	 */

	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
	/*
	 * Set a 'fake' TID for the command queue, since we use the
	 * hweight() of the tid_bitmap as a refcount now. Not that
	 * we ever even consider the command queue as one we might
	 * want to reuse, but be safe nevertheless.
	 */
	mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
		BIT(IWL_MAX_TID_COUNT + 2);

	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);

	set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
	clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);

	return 0;
}