コード例 #1
0
ファイル: nvm.c プロジェクト: asmalldev/linux
/*
 * prepare the NVM host command w/ the pointers to the nvm buffer
 * and send it to fw
 */
static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
			       u16 offset, u16 length, const u8 *data)
{
	struct iwl_nvm_access_cmd nvm_access_cmd = {
		.offset = cpu_to_le16(offset),
		.length = cpu_to_le16(length),
		.type = cpu_to_le16(section),
		.op_code = NVM_WRITE_OPCODE,
	};
	struct iwl_host_cmd cmd = {
		.id = NVM_ACCESS_CMD,
		.len = { sizeof(struct iwl_nvm_access_cmd), length },
		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
		.data = { &nvm_access_cmd, data },
		/* data may come from vmalloc, so use _DUP */
		.dataflags = { 0, IWL_HCMD_DFL_DUP },
	};
	struct iwl_rx_packet *pkt;
	struct iwl_nvm_access_resp *nvm_resp;
	int ret;

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret)
		return ret;

	pkt = cmd.resp_pkt;
	if (!pkt) {
		IWL_ERR(mvm, "Error in NVM_ACCESS response\n");
		return -EINVAL;
	}
	/* Extract & check NVM write response */
	nvm_resp = (void *)pkt->data;
	if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) {
		IWL_ERR(mvm,
			"NVM access write command failed for section %u (status = 0x%x)\n",
			section, le16_to_cpu(nvm_resp->status));
		ret = -EIO;
	}

	iwl_free_resp(&cmd);
	return ret;
}
コード例 #2
0
/*
 * prepare the NVM host command w/ the pointers to the nvm buffer
 * and send it to fw
 */
static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
			       u16 offset, u16 length, const u8 *data)
{
	struct iwl_nvm_access_cmd nvm_access_cmd = {
		.offset = cpu_to_le16(offset),
		.length = cpu_to_le16(length),
		.type = cpu_to_le16(section),
		.op_code = NVM_WRITE_OPCODE,
	};
	struct iwl_host_cmd cmd = {
		.id = NVM_ACCESS_CMD,
		.len = { sizeof(struct iwl_nvm_access_cmd), length },
		.flags = CMD_SYNC | CMD_SEND_IN_RFKILL,
		.data = { &nvm_access_cmd, data },
		/* data may come from vmalloc, so use _DUP */
		.dataflags = { 0, IWL_HCMD_DFL_DUP },
	};

	return iwl_mvm_send_cmd(mvm, &cmd);
}
コード例 #3
0
int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
			       struct ieee80211_vif *vif,
			       bool disable_offloading,
			       u32 cmd_flags)
{
	union {
		struct iwl_proto_offload_cmd_v1 v1;
		struct iwl_proto_offload_cmd_v2 v2;
		struct iwl_proto_offload_cmd_v3_small v3s;
		struct iwl_proto_offload_cmd_v3_large v3l;
	} cmd = {};
	struct iwl_host_cmd hcmd = {
		.id = PROT_OFFLOAD_CONFIG_CMD,
		.flags = cmd_flags,
		.data[0] = &cmd,
		.dataflags[0] = IWL_HCMD_DFL_DUP,
	};
	struct iwl_proto_offload_cmd_common *common;
	u32 enabled = 0, size;
	u32 capa_flags = mvm->fw->ucode_capa.flags;
#if IS_ENABLED(CONFIG_IPV6)
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	int i;

	if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
	    capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
		struct iwl_ns_config *nsc;
		struct iwl_targ_addr *addrs;
		int n_nsc, n_addrs;
		int c;

		if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
			nsc = cmd.v3s.ns_config;
			n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
			addrs = cmd.v3s.targ_addrs;
			n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
		} else {
			nsc = cmd.v3l.ns_config;
			n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
			addrs = cmd.v3l.targ_addrs;
			n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
		}

		if (mvmvif->num_target_ipv6_addrs)
			enabled |= IWL_D3_PROTO_OFFLOAD_NS;

		/*
		 * For each address we have (and that will fit) fill a target
		 * address struct and combine for NS offload structs with the
		 * solicited node addresses.
		 */
		for (i = 0, c = 0;
		     i < mvmvif->num_target_ipv6_addrs &&
		     i < n_addrs && c < n_nsc; i++) {
			struct in6_addr solicited_addr;
			int j;

			addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
						  &solicited_addr);
			for (j = 0; j < c; j++)
				if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
						  &solicited_addr) == 0)
					break;
			if (j == c)
				c++;
			addrs[i].addr = mvmvif->target_ipv6_addrs[i];
			addrs[i].config_num = cpu_to_le32(j);
			nsc[j].dest_ipv6_addr = solicited_addr;
			memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
		}

		if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
			cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
		else
			cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
	} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
		if (mvmvif->num_target_ipv6_addrs) {
			enabled |= IWL_D3_PROTO_OFFLOAD_NS;
			memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
		}

		BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
			     sizeof(mvmvif->target_ipv6_addrs[0]));

		for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
				    IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
			memcpy(cmd.v2.target_ipv6_addr[i],
			       &mvmvif->target_ipv6_addrs[i],
			       sizeof(cmd.v2.target_ipv6_addr[i]));
	} else {
		if (mvmvif->num_target_ipv6_addrs) {
			enabled |= IWL_D3_PROTO_OFFLOAD_NS;
			memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
		}

		BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
			     sizeof(mvmvif->target_ipv6_addrs[0]));

		for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
				    IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
			memcpy(cmd.v1.target_ipv6_addr[i],
			       &mvmvif->target_ipv6_addrs[i],
			       sizeof(cmd.v1.target_ipv6_addr[i]));
	}
#endif

	if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
		common = &cmd.v3s.common;
		size = sizeof(cmd.v3s);
	} else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
		common = &cmd.v3l.common;
		size = sizeof(cmd.v3l);
	} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
		common = &cmd.v2.common;
		size = sizeof(cmd.v2);
	} else {
		common = &cmd.v1.common;
		size = sizeof(cmd.v1);
	}

	if (vif->bss_conf.arp_addr_cnt) {
		enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
		common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
		memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
	}

	if (!disable_offloading)
		common->enabled = cpu_to_le32(enabled);

	hcmd.len[0] = size;
	return iwl_mvm_send_cmd(mvm, &hcmd);
}
コード例 #4
0
ファイル: fw.c プロジェクト: BORETS24/common.git-android-4.4
/* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{
	int blk_idx;
	__le32 dev_phy_addr;
	struct iwl_fw_paging_cmd fw_paging_cmd = {
		.flags =
			cpu_to_le32(PAGING_CMD_IS_SECURED |
				    PAGING_CMD_IS_ENABLED |
				    (mvm->num_of_pages_in_last_blk <<
				    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
		.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
		.block_num = cpu_to_le32(mvm->num_of_paging_blk),
	};

	/* loop for for all paging blocks + CSS block */
	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
		dev_phy_addr =
			cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
				    PAGE_2_EXP_SIZE);
		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
	}

	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
						    IWL_ALWAYS_LONG_GROUP, 0),
				    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
}

/*
 * Send paging item cmd to FW in case CPU2 has paging image
 */
static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
{
	int ret;
	struct iwl_fw_get_item_cmd fw_get_item_cmd = {
		.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
	};

	struct iwl_fw_get_item_resp *item_resp;
	struct iwl_host_cmd cmd = {
		.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
		.data = { &fw_get_item_cmd, },
	};

	cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret) {
		IWL_ERR(mvm,
			"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
			ret);
		return ret;
	}

	item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
	if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
		IWL_ERR(mvm,
			"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
			le32_to_cpu(item_resp->item_id));
		ret = -EIO;
		goto exit;
	}

	mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
						  GFP_KERNEL);
	if (!mvm->trans->paging_download_buf) {
		ret = -ENOMEM;
		goto exit;
	}
	mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
	mvm->trans->paging_db = mvm->fw_paging_db;
	IWL_DEBUG_FW(mvm,
		     "Paging: got paging request address (paging_req_addr 0x%08x)\n",
		     mvm->trans->paging_req_addr);

exit:
	iwl_free_resp(&cmd);

	return ret;
}

static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
			 struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_mvm *mvm =
		container_of(notif_wait, struct iwl_mvm, notif_wait);
	struct iwl_mvm_alive_data *alive_data = data;
	struct mvm_alive_resp_ver1 *palive1;
	struct mvm_alive_resp_ver2 *palive2;
	struct mvm_alive_resp *palive;

	if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
		palive1 = (void *)pkt->data;

		mvm->support_umac_log = false;
		mvm->error_event_table =
			le32_to_cpu(palive1->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive1->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);

		alive_data->valid = le16_to_cpu(palive1->status) ==
				    IWL_ALIVE_STATUS_OK;
		IWL_DEBUG_FW(mvm,
			     "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive1->status), palive1->ver_type,
			     palive1->ver_subtype, palive1->flags);
	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
		palive2 = (void *)pkt->data;

		mvm->error_event_table =
			le32_to_cpu(palive2->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive2->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
		mvm->umac_error_event_table =
			le32_to_cpu(palive2->error_info_addr);
		mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
		mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);

		alive_data->valid = le16_to_cpu(palive2->status) ==
				    IWL_ALIVE_STATUS_OK;
		if (mvm->umac_error_event_table)
			mvm->support_umac_log = true;

		IWL_DEBUG_FW(mvm,
			     "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive2->status), palive2->ver_type,
			     palive2->ver_subtype, palive2->flags);

		IWL_DEBUG_FW(mvm,
			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
			     palive2->umac_major, palive2->umac_minor);
	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
		palive = (void *)pkt->data;

		mvm->error_event_table =
			le32_to_cpu(palive->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
		mvm->umac_error_event_table =
			le32_to_cpu(palive->error_info_addr);
		mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
		mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);

		alive_data->valid = le16_to_cpu(palive->status) ==
				    IWL_ALIVE_STATUS_OK;
		if (mvm->umac_error_event_table)
			mvm->support_umac_log = true;

		IWL_DEBUG_FW(mvm,
			     "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive->status), palive->ver_type,
			     palive->ver_subtype, palive->flags);

		IWL_DEBUG_FW(mvm,
			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
			     le32_to_cpu(palive->umac_major),
			     le32_to_cpu(palive->umac_minor));
	}

	return true;
}

static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
				  struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_phy_db *phy_db = data;

	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
		return true;
	}

	WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));

	return false;
}

static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
					 enum iwl_ucode_type ucode_type)
{
	struct iwl_notification_wait alive_wait;
	struct iwl_mvm_alive_data alive_data;
	const struct fw_img *fw;
	int ret, i;
	enum iwl_ucode_type old_type = mvm->cur_ucode;
	static const u16 alive_cmd[] = { MVM_ALIVE };
	struct iwl_sf_region st_fwrd_space;

	if (ucode_type == IWL_UCODE_REGULAR &&
	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
		fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
	else
		fw = iwl_get_ucode_image(mvm, ucode_type);
	if (WARN_ON(!fw))
		return -EINVAL;
	mvm->cur_ucode = ucode_type;
	mvm->ucode_loaded = false;

	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
				   alive_cmd, ARRAY_SIZE(alive_cmd),
				   iwl_alive_fn, &alive_data);

	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
	if (ret) {
		mvm->cur_ucode = old_type;
		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
		return ret;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the ALIVE notification here.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);
	if (ret) {
		if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
			IWL_ERR(mvm,
				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
				iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
				iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
		mvm->cur_ucode = old_type;
		return ret;
	}

	if (!alive_data.valid) {
		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
		mvm->cur_ucode = old_type;
		return -EIO;
	}

	/*
	 * update the sdio allocation according to the pointer we get in the
	 * alive notification.
	 */
	st_fwrd_space.addr = mvm->sf_space.addr;
	st_fwrd_space.size = mvm->sf_space.size;
	ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
	if (ret) {
		IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
		return ret;
	}

	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);

	/*
	 * configure and operate fw paging mechanism.
	 * driver configures the paging flow only once, CPU2 paging image
	 * included in the IWL_UCODE_INIT image.
	 */
	if (fw->paging_mem_size) {
		/*
		 * When dma is not enabled, the driver needs to copy / write
		 * the downloaded / uploaded page to / from the smem.
		 * This gets the location of the place were the pages are
		 * stored.
		 */
		if (!is_device_dma_capable(mvm->trans->dev)) {
			ret = iwl_trans_get_paging_item(mvm);
			if (ret) {
				IWL_ERR(mvm, "failed to get FW paging item\n");
				return ret;
			}
		}

		ret = iwl_save_fw_paging(mvm, fw);
		if (ret) {
			IWL_ERR(mvm, "failed to save the FW paging image\n");
			return ret;
		}

		ret = iwl_send_paging_cmd(mvm, fw);
		if (ret) {
			IWL_ERR(mvm, "failed to send the paging cmd\n");
			iwl_free_fw_paging(mvm);
			return ret;
		}
	}

	/*
	 * Note: all the queues are enabled as part of the interface
	 * initialization, but in firmware restart scenarios they
	 * could be stopped, so wake them up. In firmware restart,
	 * mac80211 will have the queues stopped as well until the
	 * reconfiguration completes. During normal startup, they
	 * will be empty.
	 */

	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
	mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;

	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);

	mvm->ucode_loaded = true;

	return 0;
}

static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
	struct iwl_phy_cfg_cmd phy_cfg_cmd;
	enum iwl_ucode_type ucode_type = mvm->cur_ucode;

	/* Set parameters */
	phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
	phy_cfg_cmd.calib_control.event_trigger =
		mvm->fw->default_calib[ucode_type].event_trigger;
	phy_cfg_cmd.calib_control.flow_trigger =
		mvm->fw->default_calib[ucode_type].flow_trigger;

	IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
		       phy_cfg_cmd.phy_cfg);

	return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
				    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
}

int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
	struct iwl_notification_wait calib_wait;
	static const u16 init_complete[] = {
		INIT_COMPLETE_NOTIF,
		CALIB_RES_NOTIF_PHY_DB
	};
	int ret;

	lockdep_assert_held(&mvm->mutex);

	if (WARN_ON_ONCE(mvm->calibrating))
		return 0;

	iwl_init_notification_wait(&mvm->notif_wait,
				   &calib_wait,
				   init_complete,
				   ARRAY_SIZE(init_complete),
				   iwl_wait_phy_db_entry,
				   mvm->phy_db);

	/* Will also start the device */
	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
	if (ret) {
		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
		goto error;
	}

	ret = iwl_send_bt_init_conf(mvm);
	if (ret)
		goto error;

	/* Read the NVM only at driver load time, no need to do this twice */
	if (read_nvm) {
		/* Read nvm */
		ret = iwl_nvm_init(mvm, true);
		if (ret) {
			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
			goto error;
		}
	}

	/* In case we read the NVM from external file, load it to the NIC */
	if (mvm->nvm_file_name)
		iwl_mvm_load_nvm_to_nic(mvm);

	ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
	WARN_ON(ret);

	/*
	 * abort after reading the nvm in case RF Kill is on, we will complete
	 * the init seq later when RF kill will switch to off
	 */
	if (iwl_mvm_is_radio_hw_killed(mvm)) {
		IWL_DEBUG_RF_KILL(mvm,
				  "jump over all phy activities due to RF kill\n");
		iwl_remove_notification(&mvm->notif_wait, &calib_wait);
		ret = 1;
		goto out;
	}

	mvm->calibrating = true;

	/* Send TX valid antennas before triggering calibrations */
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
	if (ret)
		goto error;

	/*
	 * Send phy configurations command to init uCode
	 * to start the 16.0 uCode init image internal calibrations.
	 */
	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret) {
		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
			ret);
		goto error;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the calibration complete notification.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
			MVM_UCODE_CALIB_TIMEOUT);

	if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
		ret = 1;
	}
	goto out;

error:
	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
	mvm->calibrating = false;
	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
		/* we want to debug INIT and we have no NVM - fake */
		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
					sizeof(struct ieee80211_channel) +
					sizeof(struct ieee80211_rate),
					GFP_KERNEL);
		if (!mvm->nvm_data)
			return -ENOMEM;
		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
		mvm->nvm_data->bands[0].n_channels = 1;
		mvm->nvm_data->bands[0].n_bitrates = 1;
		mvm->nvm_data->bands[0].bitrates =
			(void *)mvm->nvm_data->channels + 1;
		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
	}

	return ret;
}

static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
{
	struct iwl_host_cmd cmd = {
		.id = SHARED_MEM_CFG,
		.flags = CMD_WANT_SKB,
		.data = { NULL, },
		.len = { 0, },
	};
	struct iwl_rx_packet *pkt;
	struct iwl_shared_mem_cfg *mem_cfg;
	u32 i;

	lockdep_assert_held(&mvm->mutex);

	if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
		return;

	pkt = cmd.resp_pkt;
	mem_cfg = (void *)pkt->data;

	mvm->shared_mem_cfg.shared_mem_addr =
		le32_to_cpu(mem_cfg->shared_mem_addr);
	mvm->shared_mem_cfg.shared_mem_size =
		le32_to_cpu(mem_cfg->shared_mem_size);
	mvm->shared_mem_cfg.sample_buff_addr =
		le32_to_cpu(mem_cfg->sample_buff_addr);
	mvm->shared_mem_cfg.sample_buff_size =
		le32_to_cpu(mem_cfg->sample_buff_size);
	mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
		mvm->shared_mem_cfg.txfifo_size[i] =
			le32_to_cpu(mem_cfg->txfifo_size[i]);
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
		mvm->shared_mem_cfg.rxfifo_size[i] =
			le32_to_cpu(mem_cfg->rxfifo_size[i]);
	mvm->shared_mem_cfg.page_buff_addr =
		le32_to_cpu(mem_cfg->page_buff_addr);
	mvm->shared_mem_cfg.page_buff_size =
		le32_to_cpu(mem_cfg->page_buff_size);
	IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");

	iwl_free_resp(&cmd);
}
コード例 #5
0
static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
			      u16 offset, u16 length, u8 *data)
{
	struct iwl_nvm_access_cmd nvm_access_cmd = {
		.offset = cpu_to_le16(offset),
		.length = cpu_to_le16(length),
		.type = cpu_to_le16(section),
		.op_code = NVM_READ_OPCODE,
	};
	struct iwl_nvm_access_resp *nvm_resp;
	struct iwl_rx_packet *pkt;
	struct iwl_host_cmd cmd = {
		.id = NVM_ACCESS_CMD,
		.flags = CMD_SYNC | CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
		.data = { &nvm_access_cmd, },
	};
	int ret, bytes_read, offset_read;
	u8 *resp_data;

	cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret)
		return ret;

	pkt = cmd.resp_pkt;
	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
		IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
			pkt->hdr.flags);
		ret = -EIO;
		goto exit;
	}

	/* Extract NVM response */
	nvm_resp = (void *)pkt->data;
	ret = le16_to_cpu(nvm_resp->status);
	bytes_read = le16_to_cpu(nvm_resp->length);
	offset_read = le16_to_cpu(nvm_resp->offset);
	resp_data = nvm_resp->data;
	if (ret) {
		IWL_ERR(mvm,
			"NVM access command failed with status %d (device: %s)\n",
			ret, mvm->cfg->name);
		ret = -EINVAL;
		goto exit;
	}

	if (offset_read != offset) {
		IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
			offset_read);
		ret = -EINVAL;
		goto exit;
	}

	/* Write data to NVM */
	memcpy(data + offset, resp_data, bytes_read);
	ret = bytes_read;

exit:
	iwl_free_resp(&cmd);
	return ret;
}

static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
				 const u8 *data, u16 length)
{
	int offset = 0;

	/* copy data in chunks of 2k (and remainder if any) */

	while (offset < length) {
		int chunk_size, ret;

		chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
				 length - offset);

		ret = iwl_nvm_write_chunk(mvm, section, offset,
					  chunk_size, data + offset);
		if (ret < 0)
			return ret;

		offset += chunk_size;
	}

	return 0;
}
コード例 #6
0
ファイル: scan.c プロジェクト: 3bsa/linux
int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
				       struct cfg80211_sched_scan_request *req)
{
	struct iwl_scan_offload_profile *profile;
	struct iwl_scan_offload_profile_cfg *profile_cfg;
	struct iwl_scan_offload_blacklist *blacklist;
	struct iwl_host_cmd cmd = {
		.id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
		.len[1] = sizeof(*profile_cfg),
		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
		.dataflags[1] = IWL_HCMD_DFL_NOCOPY,
	};
	int blacklist_len;
	int i;
	int ret;

	if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
			return -EIO;

	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
		blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
	else
		blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;

	blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
	if (!blacklist)
		return -ENOMEM;

	profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
	if (!profile_cfg) {
		ret = -ENOMEM;
		goto free_blacklist;
	}

	cmd.data[0] = blacklist;
	cmd.len[0] = sizeof(*blacklist) * blacklist_len;
	cmd.data[1] = profile_cfg;

	/* No blacklist configuration */

	profile_cfg->num_profiles = req->n_match_sets;
	profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
	profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
	profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
	if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
		profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;

	for (i = 0; i < req->n_match_sets; i++) {
		profile = &profile_cfg->profiles[i];
		profile->ssid_index = i;
		/* Support any cipher and auth algorithm */
		profile->unicast_cipher = 0xff;
		profile->auth_alg = 0xff;
		profile->network_type = IWL_NETWORK_TYPE_ANY;
		profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
		profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
	}

	IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	kfree(profile_cfg);
free_blacklist:
	kfree(blacklist);

	return ret;
}

static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
				  struct cfg80211_sched_scan_request *req)
{
	if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
		IWL_DEBUG_SCAN(mvm,
			       "Sending scheduled scan with filtering, n_match_sets %d\n",
			       req->n_match_sets);
		return false;
	}

	IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
	return true;
}

int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
			       struct ieee80211_vif *vif,
			       struct cfg80211_sched_scan_request *req,
			       struct ieee80211_scan_ies *ies)
{
	int ret;

	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
		ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
		if (ret)
			return ret;
		ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
	} else {
		mvm->scan_status = IWL_MVM_SCAN_SCHED;
		ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
		if (ret)
			return ret;
		ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
	}

	return ret;
}
コード例 #7
0
ファイル: fw.c プロジェクト: OSEC-pl/backports-iwlwifi
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
	struct iwl_geo_tx_power_profiles_resp *resp;
	int ret;

	struct iwl_geo_tx_power_profiles_cmd geo_cmd = {
		.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE),
	};
	struct iwl_host_cmd cmd = {
		.id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
		.len = { sizeof(geo_cmd), },
		.flags = CMD_WANT_SKB,
		.data = { &geo_cmd },
	};

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret) {
		IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
		return ret;
	}

	resp = (void *)cmd.resp_pkt->data;
	ret = le32_to_cpu(resp->profile_idx);
	if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
		ret = -EIO;
		IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
	}

	iwl_free_resp(&cmd);
	return ret;
}

static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
	struct iwl_geo_tx_power_profiles_cmd cmd = {
		.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
	};
	int ret, i, j;
	u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);

	ret = iwl_mvm_sar_get_wgds_table(mvm);
	if (ret < 0) {
		IWL_DEBUG_RADIO(mvm,
				"Geo SAR BIOS table invalid or unavailable. (%d)\n",
				ret);
		/* we don't fail if the table is not available */
		return 0;
	}

	IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");

	BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
		     ACPI_WGDS_TABLE_SIZE !=  ACPI_WGDS_WIFI_DATA_SIZE);

	BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);

	for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
		struct iwl_per_chain_offset *chain =
			(struct iwl_per_chain_offset *)&cmd.table[i];

		for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
			u8 *value;

			value = &mvm->geo_profiles[i].values[j *
				ACPI_GEO_PER_CHAIN_SIZE];
			chain[j].max_tx_power = cpu_to_le16(value[0]);
			chain[j].chain_a = value[1];
			chain[j].chain_b = value[2];
			IWL_DEBUG_RADIO(mvm,
					"SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
					i, j, value[1], value[2], value[0]);
		}
	}
	return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
}

#else /* CONFIG_ACPI */
static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
{
	return -ENOENT;
}

static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
{
	return -ENOENT;
}

static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
	return 0;
}

int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a,
			       int prof_b)
{
	return -ENOENT;
}

int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
	return -ENOENT;
}
#endif /* CONFIG_ACPI */

static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
{
	int ret;

	ret = iwl_mvm_sar_get_wrds_table(mvm);
	if (ret < 0) {
		IWL_DEBUG_RADIO(mvm,
				"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
				ret);
		/* if not available, don't fail and don't bother with EWRD */
		return 0;
	}

	ret = iwl_mvm_sar_get_ewrd_table(mvm);
	/* if EWRD is not available, we can still use WRDS, so don't fail */
	if (ret < 0)
		IWL_DEBUG_RADIO(mvm,
				"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
				ret);

#if defined(CPTCFG_IWLMVM_VENDOR_CMDS) && defined(CONFIG_ACPI)
	/*
	 * if no profile was chosen by the user yet, choose profile 1 (WRDS) as
	 * default for both chains
	 */
	if (mvm->sar_chain_a_profile && mvm->sar_chain_b_profile)
		ret = iwl_mvm_sar_select_profile(mvm, mvm->sar_chain_a_profile,
						 mvm->sar_chain_b_profile);
	else
#endif
	ret = iwl_mvm_sar_select_profile(mvm, 1, 1);

	/* if we don't have profile 0 from BIOS, just skip it */
	if (ret == -ENOENT)
		return 0;

	return ret;
}
コード例 #8
0
int iwl_mvm_testmode_send_cmd(struct iwl_op_mode *op_mode,
			      struct iwl_host_cmd *cmd)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
	return iwl_mvm_send_cmd(mvm, cmd);
}
コード例 #9
0
static int iwl_mvm_tm_send_hcmd(struct iwl_mvm *mvm,
				struct iwl_tm_data *data_in,
				struct iwl_tm_data *data_out)
{
	struct iwl_tm_cmd_request *hcmd_req = data_in->data;
	struct iwl_tm_cmd_request *cmd_resp;
	u32 reply_len, resp_size;
	struct iwl_rx_packet *pkt;
	struct iwl_host_cmd host_cmd = {
		.id = hcmd_req->id,
		.data[0] = hcmd_req->data,
		.len[0] = hcmd_req->len,
		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
		.flags = CMD_SYNC,
	};
	int ret;

	if (hcmd_req->want_resp)
		host_cmd.flags |= CMD_WANT_SKB;

	mutex_lock(&mvm->mutex);
	ret = iwl_mvm_send_cmd(mvm, &host_cmd);
	mutex_unlock(&mvm->mutex);
	if (ret)
		return ret;
	/* if no reply is required, we are done */
	if (!(host_cmd.flags & CMD_WANT_SKB))
		return 0;

	/* Retrieve response packet */
	pkt = host_cmd.resp_pkt;
	if (!pkt) {
		IWL_ERR(mvm->trans, "HCMD received a null response packet\n");
		return -ENOMSG;
	}
	reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;

	/* Set response data */
	resp_size = sizeof(struct iwl_tm_cmd_request) + reply_len;
	cmd_resp = kzalloc(resp_size, GFP_KERNEL);
	if (!cmd_resp) {
		iwl_free_resp(&host_cmd);
		return -ENOMEM;
	}
	cmd_resp->id = hcmd_req->id;
	cmd_resp->len = reply_len;
	memcpy(cmd_resp->data, &(pkt->hdr), reply_len);

	iwl_free_resp(&host_cmd);

	data_out->data = cmd_resp;
	data_out->len = resp_size;

	return 0;
}

static void iwl_mvm_tm_execute_reg_ops(struct iwl_trans *trans,
				       struct iwl_tm_regs_request *request,
				       struct iwl_tm_regs_request *result)
{
	struct iwl_tm_reg_op *cur_op;
	u32 idx, read_idx;
	for (idx = 0, read_idx = 0; idx < request->num; idx++) {
		cur_op = &request->reg_ops[idx];

		if  (cur_op->op_type == IWL_TM_REG_OP_READ) {
			cur_op->value = iwl_read32(trans, cur_op->address);
			memcpy(&result->reg_ops[read_idx], cur_op,
			       sizeof(*cur_op));
			read_idx++;
		} else {
			/* IWL_TM_REG_OP_WRITE is the only possible option */
			iwl_write32(trans, cur_op->address, cur_op->value);
		}
	}
}

static int iwl_mvm_tm_reg_ops(struct iwl_trans *trans,
			      struct iwl_tm_data *data_in,
			      struct iwl_tm_data *data_out)
{
	struct iwl_tm_reg_op *cur_op;
	struct iwl_tm_regs_request *request = data_in->data;
	struct iwl_tm_regs_request *result;
	u32 result_size;
	u32 idx, read_idx;
	bool is_grab_nic_access_required = true;
	unsigned long flags;

	/* Calculate result size (result is returned only for read ops) */
	for (idx = 0, read_idx = 0; idx < request->num; idx++) {
		if (request->reg_ops[idx].op_type == IWL_TM_REG_OP_READ)
			read_idx++;
		/* check if there is an operation that it is not */
		/* in the CSR range (0x00000000 - 0x000003FF)    */
		/* and not in the AL range			 */
		cur_op = &request->reg_ops[idx];
		if (IS_AL_ADDR(cur_op->address) ||
		    (cur_op->address < HBUS_BASE))
			is_grab_nic_access_required = false;

	}
	result_size = sizeof(struct iwl_tm_regs_request) +
		      read_idx*sizeof(struct iwl_tm_reg_op);

	result = kzalloc(result_size, GFP_KERNEL);
	if (!result)
		return -ENOMEM;
	result->num = read_idx;
	if (is_grab_nic_access_required) {
		if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
			kfree(result);
			return -EBUSY;
		}
		iwl_mvm_tm_execute_reg_ops(trans, request, result);
		iwl_trans_release_nic_access(trans, &flags);
	} else {
		iwl_mvm_tm_execute_reg_ops(trans, request, result);
	}

	data_out->data = result;
	data_out->len = result_size;

	return 0;
}

static int iwl_tm_get_dev_info(struct iwl_mvm *mvm,
			       struct iwl_tm_data *data_out)
{
	struct iwl_tm_dev_info *dev_info;
	const u8 driver_ver[] = IWLWIFI_VERSION;

	if (!mvm->nvm_data)
		return -EINVAL;

	dev_info = kzalloc(sizeof(struct iwl_tm_dev_info) +
			   (strlen(driver_ver)+1)*sizeof(u8), GFP_KERNEL);
	if (!dev_info)
		return -ENOMEM;

	dev_info->dev_id = mvm->trans->hw_id;
	dev_info->fw_ver = mvm->fw->ucode_ver;
	dev_info->vendor_id = PCI_VENDOR_ID_INTEL;

	dev_info->silicon_step = mvm->nvm_data->radio_cfg_step;

	/* TODO: Assign real value when feature is implemented */
	dev_info->build_ver = 0x00;

	strcpy(dev_info->driver_ver, driver_ver);

	data_out->data = dev_info;
	data_out->len = sizeof(*dev_info);

	return 0;
}

static int iwl_tm_indirect_read(struct iwl_mvm *mvm,
				struct iwl_tm_data *data_in,
				struct iwl_tm_data *data_out)
{
	struct iwl_trans *trans = mvm->trans;
	struct iwl_tm_sram_read_request *cmd_in = data_in->data;
	u32 addr = cmd_in->offset;
	u32 size = cmd_in->length;
	u32 *buf32, size32, i;
	unsigned long flags;

	if (size & (sizeof(u32)-1))
		return -EINVAL;

	data_out->data = kmalloc(size, GFP_KERNEL);
	if (!data_out->data)
		return -ENOMEM;

	data_out->len = size;

	size32 = size / sizeof(u32);
	buf32 = data_out->data;

	mutex_lock(&mvm->mutex);

	/* Hard-coded periphery absolute address */
	if (IWL_ABS_PRPH_START <= addr &&
	    addr < IWL_ABS_PRPH_START + PRPH_END) {
		if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
			mutex_unlock(&mvm->mutex);
			return -EBUSY;
		}
		for (i = 0; i < size32; i++)
			buf32[i] = iwl_trans_read_prph(trans,
						       addr + i * sizeof(u32));
		iwl_trans_release_nic_access(trans, &flags);
	} else {
		/* target memory (SRAM) */
		iwl_trans_read_mem(trans, addr, buf32, size32);
	}

	mutex_unlock(&mvm->mutex);
	return 0;
}

static int iwl_tm_indirect_write(struct iwl_mvm *mvm,
				 struct iwl_tm_data *data_in)
{
	struct iwl_trans *trans = mvm->trans;
	struct iwl_tm_sram_write_request *cmd_in = data_in->data;
	u32 addr = cmd_in->offset;
	u32 size = cmd_in->len;
	u8 *buf = cmd_in->buffer;
	u32 *buf32 = (u32 *)buf, size32 = size / sizeof(u32);
	unsigned long flags;
	u32 val, i;

	mutex_lock(&mvm->mutex);
	if (IWL_ABS_PRPH_START <= addr &&
	    addr < IWL_ABS_PRPH_START + PRPH_END) {
		/* Periphery writes can be 1-3 bytes long, or DWORDs */
		if (size < 4) {
			memcpy(&val, buf, size);
			if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
				mutex_unlock(&mvm->mutex);
				return -EBUSY;
			}
			iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
				    (addr & 0x000FFFFF) | ((size - 1) << 24));
			iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
			iwl_trans_release_nic_access(trans, &flags);
		} else {
			if (size % sizeof(u32)) {
				mutex_unlock(&mvm->mutex);
				return -EINVAL;
			}

			for (i = 0; i < size32; i++)
				iwl_write_prph(trans, addr + i*sizeof(u32),
					       buf32[i]);
		}
	} else {
		iwl_trans_write_mem(trans, addr, buf32, size32);
	}
	mutex_unlock(&mvm->mutex);

	return 0;
}

/**
 * iwl_mvm_tm_cmd_execute - Implementation of test command executor callback
 * @op_mode:	  Specific device's operation mode
 * @cmd:	  User space command's index
 * @data_in:	  Input data. "data" field is to be casted to relevant
 *		  data structure. All verification must be done in the
 *		  caller function, therefor assuming that input data
 *		  length is valid.
 * @data_out:	  Will be allocated inside, freeing is in the caller's
 *		  responsibility
 */
int iwl_mvm_tm_cmd_execute(struct iwl_op_mode *op_mode, u32 cmd,
			   struct iwl_tm_data *data_in,
			   struct iwl_tm_data *data_out)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

	if (WARN_ON_ONCE(!op_mode || !data_in))
		return -EINVAL;

	switch (cmd) {
	case IWL_TM_USER_CMD_HCMD:
		return iwl_mvm_tm_send_hcmd(mvm, data_in, data_out);
	case IWL_TM_USER_CMD_REG_ACCESS:
		return iwl_mvm_tm_reg_ops(mvm->trans, data_in, data_out);
	case IWL_TM_USER_CMD_SRAM_WRITE:
		return iwl_tm_indirect_write(mvm, data_in);
	case IWL_TM_USER_CMD_SRAM_READ:
		return iwl_tm_indirect_read(mvm, data_in, data_out);
	case IWL_TM_USER_CMD_GET_DEVICE_INFO:
		return iwl_tm_get_dev_info(mvm, data_out);
	default:
		break;
	}

	return -EOPNOTSUPP;
}

/**
 * iwl_tm_mvm_send_rx() - Send a spontaneous rx message to user
 * @mvm:	mvm opmode pointer
 * @rxb:	Contains rx packet to be sent
 */
void iwl_tm_mvm_send_rx(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
	struct iwl_rx_packet *pkt;
	int length;

	pkt = rxb_addr(rxb);
	length = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;

	/* the length doesn't include len_n_flags field, so add it manually */
	length += sizeof(__le32);

	iwl_tm_gnl_send_msg(mvm->trans, IWL_TM_USER_CMD_NOTIF_UCODE_RX_PKT,
			    true, (void *)pkt, length, GFP_ATOMIC);
}
コード例 #10
0
ファイル: nvm.c プロジェクト: asmalldev/linux
struct iwl_mcc_update_resp *
iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
		   enum iwl_mcc_source src_id)
{
	struct iwl_mcc_update_cmd mcc_update_cmd = {
		.mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
		.source_id = (u8)src_id,
	};
	struct iwl_mcc_update_resp *resp_cp;
	struct iwl_rx_packet *pkt;
	struct iwl_host_cmd cmd = {
		.id = MCC_UPDATE_CMD,
		.flags = CMD_WANT_SKB,
		.data = { &mcc_update_cmd },
	};

	int ret;
	u32 status;
	int resp_len, n_channels;
	u16 mcc;
	bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa,
				   IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2);

	if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
		return ERR_PTR(-EOPNOTSUPP);

	cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
	if (!resp_v2)
		cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1);

	IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
		      alpha2[0], alpha2[1], src_id);

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret)
		return ERR_PTR(ret);

	pkt = cmd.resp_pkt;

	/* Extract MCC response */
	if (resp_v2) {
		struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;

		n_channels =  __le32_to_cpu(mcc_resp->n_channels);
		resp_len = sizeof(struct iwl_mcc_update_resp) +
			   n_channels * sizeof(__le32);
		resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
	} else {
		struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;

		n_channels =  __le32_to_cpu(mcc_resp_v1->n_channels);
		resp_len = sizeof(struct iwl_mcc_update_resp) +
			   n_channels * sizeof(__le32);
		resp_cp = kzalloc(resp_len, GFP_KERNEL);

		if (resp_cp) {
			resp_cp->status = mcc_resp_v1->status;
			resp_cp->mcc = mcc_resp_v1->mcc;
			resp_cp->cap = mcc_resp_v1->cap;
			resp_cp->source_id = mcc_resp_v1->source_id;
			resp_cp->n_channels = mcc_resp_v1->n_channels;
			memcpy(resp_cp->channels, mcc_resp_v1->channels,
			       n_channels * sizeof(__le32));
		}
	}

	if (!resp_cp) {
		ret = -ENOMEM;
		goto exit;
	}

	status = le32_to_cpu(resp_cp->status);

	mcc = le16_to_cpu(resp_cp->mcc);

	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
	if (mcc == 0) {
		mcc = 0x3030;  /* "00" - world */
		resp_cp->mcc = cpu_to_le16(mcc);
	}

	IWL_DEBUG_LAR(mvm,
		      "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
		      status, mcc, mcc >> 8, mcc & 0xff,
		      !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);

exit:
	iwl_free_resp(&cmd);
	if (ret)
		return ERR_PTR(ret);
	return resp_cp;
}

int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
{
	bool tlv_lar;
	bool nvm_lar;
	int retval;
	struct ieee80211_regdomain *regd;
	char mcc[3];

	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
		tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
				      IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
		nvm_lar = mvm->nvm_data->lar_enabled;
		if (tlv_lar != nvm_lar)
			IWL_INFO(mvm,
				 "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
				 tlv_lar ? "enabled" : "disabled",
				 nvm_lar ? "enabled" : "disabled");
	}

	if (!iwl_mvm_is_lar_supported(mvm))
		return 0;

	/*
	 * try to replay the last set MCC to FW. If it doesn't exist,
	 * queue an update to cfg80211 to retrieve the default alpha2 from FW.
	 */
	retval = iwl_mvm_init_fw_regd(mvm);
	if (retval != -ENOENT)
		return retval;

	/*
	 * Driver regulatory hint for initial update, this also informs the
	 * firmware we support wifi location updates.
	 * Disallow scans that might crash the FW while the LAR regdomain
	 * is not set.
	 */
	mvm->lar_regdom_set = false;

	regd = iwl_mvm_get_current_regdomain(mvm, NULL);
	if (IS_ERR_OR_NULL(regd))
		return -EIO;

	if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
	    !iwl_get_bios_mcc(mvm->dev, mcc)) {
		kfree(regd);
		regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
					     MCC_SOURCE_BIOS, NULL);
		if (IS_ERR_OR_NULL(regd))
			return -EIO;
	}

	retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
	kfree(regd);
	return retval;
}
コード例 #11
0
ファイル: nvm.c プロジェクト: asmalldev/linux
static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
			      u16 offset, u16 length, u8 *data)
{
	struct iwl_nvm_access_cmd nvm_access_cmd = {
		.offset = cpu_to_le16(offset),
		.length = cpu_to_le16(length),
		.type = cpu_to_le16(section),
		.op_code = NVM_READ_OPCODE,
	};
	struct iwl_nvm_access_resp *nvm_resp;
	struct iwl_rx_packet *pkt;
	struct iwl_host_cmd cmd = {
		.id = NVM_ACCESS_CMD,
		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
		.data = { &nvm_access_cmd, },
	};
	int ret, bytes_read, offset_read;
	u8 *resp_data;

	cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret)
		return ret;

	pkt = cmd.resp_pkt;

	/* Extract NVM response */
	nvm_resp = (void *)pkt->data;
	ret = le16_to_cpu(nvm_resp->status);
	bytes_read = le16_to_cpu(nvm_resp->length);
	offset_read = le16_to_cpu(nvm_resp->offset);
	resp_data = nvm_resp->data;
	if (ret) {
		if ((offset != 0) &&
		    (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
			/*
			 * meaning of NOT_VALID_ADDRESS:
			 * driver try to read chunk from address that is
			 * multiple of 2K and got an error since addr is empty.
			 * meaning of (offset != 0): driver already
			 * read valid data from another chunk so this case
			 * is not an error.
			 */
			IWL_DEBUG_EEPROM(mvm->trans->dev,
					 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
					 offset);
			ret = 0;
		} else {
			IWL_DEBUG_EEPROM(mvm->trans->dev,
					 "NVM access command failed with status %d (device: %s)\n",
					 ret, mvm->cfg->name);
			ret = -EIO;
		}
		goto exit;
	}

	if (offset_read != offset) {
		IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
			offset_read);
		ret = -EINVAL;
		goto exit;
	}

	/* Write data to NVM */
	memcpy(data + offset, resp_data, bytes_read);
	ret = bytes_read;

exit:
	iwl_free_resp(&cmd);
	return ret;
}

static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
				 const u8 *data, u16 length)
{
	int offset = 0;

	/* copy data in chunks of 2k (and remainder if any) */

	while (offset < length) {
		int chunk_size, ret;

		chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
				 length - offset);

		ret = iwl_nvm_write_chunk(mvm, section, offset,
					  chunk_size, data + offset);
		if (ret < 0)
			return ret;

		offset += chunk_size;
	}

	return 0;
}
コード例 #12
0
ファイル: led.c プロジェクト: Anjali05/linux
static void iwl_mvm_send_led_fw_cmd(struct iwl_mvm *mvm, bool on)
{
	struct iwl_led_cmd led_cmd = {
		.status = cpu_to_le32(on),
	};
	struct iwl_host_cmd cmd = {
		.id = WIDE_ID(LONG_GROUP, LEDS_CMD),
		.len = { sizeof(led_cmd), },
		.data = { &led_cmd, },
		.flags = CMD_ASYNC,
	};
	int err;

	if (!iwl_mvm_firmware_running(mvm))
		return;

	err = iwl_mvm_send_cmd(mvm, &cmd);

	if (err)
		IWL_WARN(mvm, "LED command failed: %d\n", err);
}

static void iwl_mvm_led_set(struct iwl_mvm *mvm, bool on)
{
	if (fw_has_capa(&mvm->fw->ucode_capa,
			IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT)) {
		iwl_mvm_send_led_fw_cmd(mvm, on);
		return;
	}

	iwl_write32(mvm->trans, CSR_LED_REG,
		    on ? CSR_LED_REG_TURN_ON : CSR_LED_REG_TURN_OFF);
}

static void iwl_led_brightness_set(struct led_classdev *led_cdev,
				   enum led_brightness brightness)
{
	struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led);

	iwl_mvm_led_set(mvm, brightness > 0);
}

int iwl_mvm_leds_init(struct iwl_mvm *mvm)
{
	int mode = iwlwifi_mod_params.led_mode;
	int ret;

	switch (mode) {
	case IWL_LED_BLINK:
		IWL_ERR(mvm, "Blink led mode not supported, used default\n");
		/* fall through */
	case IWL_LED_DEFAULT:
	case IWL_LED_RF_STATE:
		mode = IWL_LED_RF_STATE;
		break;
	case IWL_LED_DISABLE:
		IWL_INFO(mvm, "Led disabled\n");
		return 0;
	default:
		return -EINVAL;
	}

	mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
				   wiphy_name(mvm->hw->wiphy));
	mvm->led.brightness_set = iwl_led_brightness_set;
	mvm->led.max_brightness = 1;

	if (mode == IWL_LED_RF_STATE)
		mvm->led.default_trigger =
			ieee80211_get_radio_led_name(mvm->hw);

	ret = led_classdev_register(mvm->trans->dev, &mvm->led);
	if (ret) {
		kfree(mvm->led.name);
		IWL_INFO(mvm, "Failed to enable led\n");
		return ret;
	}

	mvm->init_status |= IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
	return 0;
}
コード例 #13
0
ファイル: ftm-responder.c プロジェクト: Anjali05/linux
static int
iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
			  struct ieee80211_vif *vif,
			  struct cfg80211_chan_def *chandef)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	struct iwl_tof_responder_config_cmd cmd = {
		.channel_num = chandef->chan->hw_value,
		.cmd_valid_fields =
			cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO |
				    IWL_TOF_RESPONDER_CMD_VALID_BSSID |
				    IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
		.sta_id = mvmvif->bcast_sta.sta_id,
	};

	lockdep_assert_held(&mvm->mutex);

	switch (chandef->width) {
	case NL80211_CHAN_WIDTH_20_NOHT:
		cmd.bandwidth = IWL_TOF_BW_20_LEGACY;
		break;
	case NL80211_CHAN_WIDTH_20:
		cmd.bandwidth = IWL_TOF_BW_20_HT;
		break;
	case NL80211_CHAN_WIDTH_40:
		cmd.bandwidth = IWL_TOF_BW_40;
		cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
		break;
	case NL80211_CHAN_WIDTH_80:
		cmd.bandwidth = IWL_TOF_BW_80;
		cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
		break;
	default:
		WARN_ON(1);
		return -EINVAL;
	}

	memcpy(cmd.bssid, vif->addr, ETH_ALEN);

	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD,
						    LOCATION_GROUP, 0),
				    0, sizeof(cmd), &cmd);
}

static int
iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
				  struct ieee80211_vif *vif,
				  struct ieee80211_ftm_responder_params *params)
{
	struct iwl_tof_responder_dyn_config_cmd cmd = {
		.lci_len = cpu_to_le32(params->lci_len + 2),
		.civic_len = cpu_to_le32(params->civicloc_len + 2),
	};
	u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0};
	struct iwl_host_cmd hcmd = {
		.id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
				 LOCATION_GROUP, 0),
		.data[0] = &cmd,
		.len[0] = sizeof(cmd),
		.data[1] = &data,
		/* .len[1] set later */
		/* may not be able to DMA from stack */
		.dataflags[1] = IWL_HCMD_DFL_DUP,
	};
	u32 aligned_lci_len = ALIGN(params->lci_len + 2, 4);
	u32 aligned_civicloc_len = ALIGN(params->civicloc_len + 2, 4);
	u8 *pos = data;

	lockdep_assert_held(&mvm->mutex);

	if (aligned_lci_len + aligned_civicloc_len > sizeof(data)) {
		IWL_ERR(mvm, "LCI/civicloc data too big (%zd + %zd)\n",
			params->lci_len, params->civicloc_len);
		return -ENOBUFS;
	}

	pos[0] = WLAN_EID_MEASURE_REPORT;
	pos[1] = params->lci_len;
	memcpy(pos + 2, params->lci, params->lci_len);

	pos += aligned_lci_len;
	pos[0] = WLAN_EID_MEASURE_REPORT;
	pos[1] = params->civicloc_len;
	memcpy(pos + 2, params->civicloc, params->civicloc_len);

	hcmd.len[1] = aligned_lci_len + aligned_civicloc_len;

	return iwl_mvm_send_cmd(mvm, &hcmd);
}

int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	struct ieee80211_ftm_responder_params *params;
	struct ieee80211_chanctx_conf ctx, *pctx;
	u16 *phy_ctxt_id;
	struct iwl_mvm_phy_ctxt *phy_ctxt;
	int ret;

	params = vif->bss_conf.ftmr_params;

	lockdep_assert_held(&mvm->mutex);

	if (WARN_ON_ONCE(!vif->bss_conf.ftm_responder))
		return -EINVAL;

	if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
	    !mvmvif->ap_ibss_active) {
		IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
		return -EIO;
	}

	rcu_read_lock();
	pctx = rcu_dereference(vif->chanctx_conf);
	/* Copy the ctx to unlock the rcu and send the phy ctxt. We don't care
	 * about changes in the ctx after releasing the lock because the driver
	 * is still protected by the mutex. */
	ctx = *pctx;
	phy_ctxt_id  = (u16 *)pctx->drv_priv;
	rcu_read_unlock();

	phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
	ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def,
				       ctx.rx_chains_static,
				       ctx.rx_chains_dynamic);
	if (ret)
		return ret;

	ret = iwl_mvm_ftm_responder_cmd(mvm, vif, &ctx.def);
	if (ret)
		return ret;

	if (params)
		ret = iwl_mvm_ftm_responder_dyn_cfg_cmd(mvm, vif, params);

	return ret;
}

void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
				   struct ieee80211_vif *vif)
{
	if (!vif->bss_conf.ftm_responder)
		return;

	iwl_mvm_ftm_start_responder(mvm, vif);
}

void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm,
				 struct iwl_rx_cmd_buffer *rxb)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_ftm_responder_stats *resp = (void *)pkt->data;
	struct cfg80211_ftm_responder_stats *stats = &mvm->ftm_resp_stats;
	u32 flags = le32_to_cpu(resp->flags);

	if (resp->success_ftm == resp->ftm_per_burst)
		stats->success_num++;
	else if (resp->success_ftm >= 2)
		stats->partial_num++;
	else
		stats->failed_num++;

	if ((flags & FTM_RESP_STAT_ASAP_REQ) &&
	    (flags & FTM_RESP_STAT_ASAP_RESP))
		stats->asap_num++;

	if (flags & FTM_RESP_STAT_NON_ASAP_RESP)
		stats->non_asap_num++;

	stats->total_duration_ms += le32_to_cpu(resp->duration) / USEC_PER_MSEC;

	if (flags & FTM_RESP_STAT_TRIGGER_UNKNOWN)
		stats->unknown_triggers_num++;

	if (flags & FTM_RESP_STAT_DUP)
		stats->reschedule_requests_num++;

	if (flags & FTM_RESP_STAT_NON_ASAP_OUT_WIN)
		stats->out_of_window_triggers_num++;
}