示例#1
0
void ixp425_insw(u32 p, u16 *addr, u32 count)
{
	while (count--)
		*addr++ = cpu_to_le16(inw(p));
}
示例#2
0
void ath_paprd_calibrate(struct work_struct *work)
{
	struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
	struct ieee80211_hw *hw = sc->hw;
	struct ath_hw *ah = sc->sc_ah;
	struct ieee80211_hdr *hdr;
	struct sk_buff *skb = NULL;
	struct ath9k_hw_cal_data *caldata = ah->caldata;
	struct ath_common *common = ath9k_hw_common(ah);
	int ftype;
	int chain_ok = 0;
	int chain;
	int len = 1800;
	int ret;

	if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done)
		return;

	ath9k_ps_wakeup(sc);

	if (ar9003_paprd_init_table(ah) < 0)
		goto fail_paprd;

	skb = alloc_skb(len, GFP_KERNEL);
	if (!skb)
		goto fail_paprd;

	skb_put(skb, len);
	memset(skb->data, 0, len);
	hdr = (struct ieee80211_hdr *)skb->data;
	ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
	hdr->frame_control = cpu_to_le16(ftype);
	hdr->duration_id = cpu_to_le16(10);
	memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
	memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
	memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);

	for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
		if (!(ah->txchainmask & BIT(chain)))
			continue;

		chain_ok = 0;
		ar9003_paprd_setup_gain_table(ah, chain);

		ath_dbg(common, CALIBRATE,
			"Sending PAPRD training frame on chain %d\n", chain);
		if (!ath_paprd_send_frame(sc, skb, chain))
			goto fail_paprd;

		if (!ar9003_paprd_is_done(ah)) {
			ath_dbg(common, CALIBRATE,
				"PAPRD not yet done on chain %d\n", chain);
			break;
		}

		ret = ar9003_paprd_create_curve(ah, caldata, chain);
		if (ret == -EINPROGRESS) {
			ath_dbg(common, CALIBRATE,
				"PAPRD curve on chain %d needs to be re-trained\n",
				chain);
			break;
		} else if (ret) {
			ath_dbg(common, CALIBRATE,
				"PAPRD create curve failed on chain %d\n",
				chain);
			break;
		}

		chain_ok = 1;
	}
	kfree_skb(skb);

	if (chain_ok) {
		caldata->paprd_done = true;
		ath_paprd_activate(sc);
	}

fail_paprd:
	ath9k_ps_restore(sc);
}
static ssize_t ieee80211_if_parse_tkip_mic_test(
	struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
	struct ieee80211_local *local = sdata->local;
	u8 addr[ETH_ALEN];
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	__le16 fc;

	/*
	 * Assume colon-delimited MAC address with possible white space
	 * following.
	 */
	if (buflen < 3 * ETH_ALEN - 1)
		return -EINVAL;
	if (hwaddr_aton(buf, addr) < 0)
		return -EINVAL;

	if (!ieee80211_sdata_running(sdata))
		return -ENOTCONN;

	skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100);
	if (!skb)
		return -ENOMEM;
	skb_reserve(skb, local->hw.extra_tx_headroom);

	hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
	memset(hdr, 0, 24);
	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);

	switch (sdata->vif.type) {
	case NL80211_IFTYPE_AP:
		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
		/* DA BSSID SA */
		memcpy(hdr->addr1, addr, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN);
		break;
	case NL80211_IFTYPE_STATION:
		fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
		/* BSSID SA DA */
		if (sdata->vif.bss_conf.bssid == NULL) {
			dev_kfree_skb(skb);
			return -ENOTCONN;
		}
		memcpy(hdr->addr1, sdata->vif.bss_conf.bssid, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		memcpy(hdr->addr3, addr, ETH_ALEN);
		break;
	default:
		dev_kfree_skb(skb);
		return -EOPNOTSUPP;
	}
	hdr->frame_control = fc;

	/*
	 * Add some length to the test frame to make it look bit more valid.
	 * The exact contents does not matter since the recipient is required
	 * to drop this because of the Michael MIC failure.
	 */
	memset(skb_put(skb, 50), 0, 50);

	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;

	ieee80211_tx_skb(sdata, skb);

	return buflen;
}
示例#4
0
/**
 *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
 *  @hw: pointer to the hardware structure
 **/
static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
{
	struct i40e_aq_desc *desc;
	struct iavf_dma_mem *bi;
	iavf_status ret_code;
	int i;

	/* We'll be allocating the buffer info memory first, then we can
	 * allocate the mapped buffers for the event processing
	 */

	/* buffer_info structures do not need alignment */
	ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
					  (hw->aq.num_arq_entries *
					   sizeof(struct iavf_dma_mem)));
	if (ret_code)
		goto alloc_arq_bufs;
	hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;

	/* allocate the mapped buffers */
	for (i = 0; i < hw->aq.num_arq_entries; i++) {
		bi = &hw->aq.arq.r.arq_bi[i];
		ret_code = iavf_allocate_dma_mem(hw, bi,
						 i40e_mem_arq_buf,
						 hw->aq.arq_buf_size,
						 IAVF_ADMINQ_DESC_ALIGNMENT);
		if (ret_code)
			goto unwind_alloc_arq_bufs;

		/* now configure the descriptors for use */
		desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);

		desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
			desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
		desc->opcode = 0;
		/* This is in accordance with Admin queue design, there is no
		 * register for buffer size configuration
		 */
		desc->datalen = cpu_to_le16((u16)bi->size);
		desc->retval = 0;
		desc->cookie_high = 0;
		desc->cookie_low = 0;
		desc->params.external.addr_high =
			cpu_to_le32(upper_32_bits(bi->pa));
		desc->params.external.addr_low =
			cpu_to_le32(lower_32_bits(bi->pa));
		desc->params.external.param0 = 0;
		desc->params.external.param1 = 0;
	}

alloc_arq_bufs:
	return ret_code;

unwind_alloc_arq_bufs:
	/* don't try to free the one that failed... */
	i--;
	for (; i >= 0; i--)
		iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
	iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);

	return ret_code;
}
示例#5
0
/**
 *  iavf_clean_arq_element
 *  @hw: pointer to the hw struct
 *  @e: event info from the receive descriptor, includes any buffers
 *  @pending: number of events that could be left to process
 *
 *  This function cleans one Admin Receive Queue element and returns
 *  the contents through e.  It can also return how many events are
 *  left to process through 'pending'
 **/
iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
				   struct i40e_arq_event_info *e,
				   u16 *pending)
{
	u16 ntc = hw->aq.arq.next_to_clean;
	struct i40e_aq_desc *desc;
	iavf_status ret_code = 0;
	struct iavf_dma_mem *bi;
	u16 desc_idx;
	u16 datalen;
	u16 flags;
	u16 ntu;

	/* pre-clean the event info */
	memset(&e->desc, 0, sizeof(e->desc));

	/* take the lock before we start messing with the ring */
	mutex_lock(&hw->aq.arq_mutex);

	if (hw->aq.arq.count == 0) {
		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
			   "AQRX: Admin queue not initialized.\n");
		ret_code = I40E_ERR_QUEUE_EMPTY;
		goto clean_arq_element_err;
	}

	/* set next_to_use to head */
	ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
	if (ntu == ntc) {
		/* nothing to do - shouldn't need to update ring's values */
		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
		goto clean_arq_element_out;
	}

	/* now clean the next descriptor */
	desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
	desc_idx = ntc;

	hw->aq.arq_last_status =
		(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
	flags = le16_to_cpu(desc->flags);
	if (flags & I40E_AQ_FLAG_ERR) {
		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
		iavf_debug(hw,
			   IAVF_DEBUG_AQ_MESSAGE,
			   "AQRX: Event received with error 0x%X.\n",
			   hw->aq.arq_last_status);
	}

	e->desc = *desc;
	datalen = le16_to_cpu(desc->datalen);
	e->msg_len = min(datalen, e->buf_len);
	if (e->msg_buf && (e->msg_len != 0))
		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
		       e->msg_len);

	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
		      hw->aq.arq_buf_size);

	/* Restore the original datalen and buffer address in the desc,
	 * FW updates datalen to indicate the event message
	 * size
	 */
	bi = &hw->aq.arq.r.arq_bi[ntc];
	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));

	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
	desc->datalen = cpu_to_le16((u16)bi->size);
	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));

	/* set tail = the last cleaned desc index. */
	wr32(hw, hw->aq.arq.tail, ntc);
	/* ntc is updated to tail + 1 */
	ntc++;
	if (ntc == hw->aq.num_arq_entries)
		ntc = 0;
	hw->aq.arq.next_to_clean = ntc;
	hw->aq.arq.next_to_use = ntu;

clean_arq_element_out:
	/* Set pending if needed, unlock and return */
	if (pending)
		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);

clean_arq_element_err:
	mutex_unlock(&hw->aq.arq_mutex);

	return ret_code;
}
示例#6
0
/*
 * This function prepares command for ad-hoc join.
 *
 * Most of the parameters are set up by copying from the target BSS descriptor
 * from the scan response.
 *
 * In addition, the following TLVs are added -
 *      - Channel TLV
 *      - Vendor specific IE
 *      - WPA/WPA2 IE
 *      - 11n IE
 *
 * Preparation also includes -
 *      - Setting command ID and proper size
 *      - Ensuring correct endian-ness
 */
int
mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
			       struct host_cmd_ds_command *cmd, void *data_buf)
{
	int rsn_ie_len = 0;
	struct host_cmd_ds_802_11_ad_hoc_join *adhoc_join =
		&cmd->params.adhoc_join;
	struct mwifiex_bssdescriptor *bss_desc =
		(struct mwifiex_bssdescriptor *) data_buf;
	struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
	u32 cmd_append_size = 0;
	u16 tmp_cap;
	u32 i, rates_size = 0;
	u16 curr_pkt_filter;
	u8 *pos =
		(u8 *) adhoc_join +
		sizeof(struct host_cmd_ds_802_11_ad_hoc_join);

/* Use G protection */
#define USE_G_PROTECTION        0x02
	if (bss_desc->erp_flags & USE_G_PROTECTION) {
		curr_pkt_filter =
			priv->
			curr_pkt_filter | HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON;

		if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
					     HostCmd_ACT_GEN_SET, 0,
					     &curr_pkt_filter)) {
			dev_err(priv->adapter->dev,
			       "ADHOC_J_CMD: G Protection config failed\n");
			return -1;
		}
	}

	priv->attempted_bss_desc = bss_desc;

	cmd->command = cpu_to_le16(HostCmd_CMD_802_11_AD_HOC_JOIN);

	adhoc_join->bss_descriptor.bss_mode = HostCmd_BSS_MODE_IBSS;

	adhoc_join->bss_descriptor.beacon_period
		= cpu_to_le16(bss_desc->beacon_period);

	memcpy(&adhoc_join->bss_descriptor.bssid,
	       &bss_desc->mac_address, ETH_ALEN);

	memcpy(&adhoc_join->bss_descriptor.ssid,
	       &bss_desc->ssid.ssid, bss_desc->ssid.ssid_len);

	memcpy(&adhoc_join->bss_descriptor.phy_param_set,
	       &bss_desc->phy_param_set,
	       sizeof(union ieee_types_phy_param_set));

	memcpy(&adhoc_join->bss_descriptor.ss_param_set,
	       &bss_desc->ss_param_set, sizeof(union ieee_types_ss_param_set));

	tmp_cap = bss_desc->cap_info_bitmap;

	tmp_cap &= CAPINFO_MASK;

	dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: tmp_cap=%4X"
			" CAPINFO_MASK=%4lX\n", tmp_cap, CAPINFO_MASK);

	/* Information on BSSID descriptor passed to FW */
	dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: BSSID = %pM, SSID = %s\n",
				adhoc_join->bss_descriptor.bssid,
				adhoc_join->bss_descriptor.ssid);

	for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
		    bss_desc->supported_rates[i]; i++)
		;
	rates_size = i;

	/* Copy Data Rates from the Rates recorded in scan response */
	memset(adhoc_join->bss_descriptor.data_rates, 0,
	       sizeof(adhoc_join->bss_descriptor.data_rates));
	memcpy(adhoc_join->bss_descriptor.data_rates,
	       bss_desc->supported_rates, rates_size);

	/* Copy the adhoc join rates into Current BSS state structure */
	priv->curr_bss_params.num_of_rates = rates_size;
	memcpy(&priv->curr_bss_params.data_rates, bss_desc->supported_rates,
	       rates_size);

	/* Copy the channel information */
	priv->curr_bss_params.bss_descriptor.channel = bss_desc->channel;
	priv->curr_bss_params.band = (u8) bss_desc->bss_band;

	if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED
	    || priv->sec_info.wpa_enabled)
		tmp_cap |= WLAN_CAPABILITY_PRIVACY;

	if (IS_SUPPORT_MULTI_BANDS(priv->adapter)) {
		/* Append a channel TLV */
		chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
		chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
		chan_tlv->header.len =
			cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));

		memset(chan_tlv->chan_scan_param, 0x00,
		       sizeof(struct mwifiex_chan_scan_param_set));
		chan_tlv->chan_scan_param[0].chan_number =
			(bss_desc->phy_param_set.ds_param_set.current_chan);
		dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Chan = %d\n",
		       chan_tlv->chan_scan_param[0].chan_number);

		chan_tlv->chan_scan_param[0].radio_type =
			mwifiex_band_to_radio_type((u8) bss_desc->bss_band);

		dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Band = %d\n",
		       chan_tlv->chan_scan_param[0].radio_type);
		pos += sizeof(chan_tlv->header) +
			sizeof(struct mwifiex_chan_scan_param_set);
		cmd_append_size += sizeof(chan_tlv->header) +
			sizeof(struct mwifiex_chan_scan_param_set);
	}

	if (priv->sec_info.wpa_enabled)
		rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
	if (rsn_ie_len == -1)
		return -1;
	cmd_append_size += rsn_ie_len;

	if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
		cmd_append_size += mwifiex_cmd_append_11n_tlv(priv,
			bss_desc, &pos);

	/* Append vendor specific IE TLV */
	cmd_append_size += mwifiex_cmd_append_vsie_tlv(priv,
			MWIFIEX_VSIE_MASK_ADHOC, &pos);

	cmd->size = cpu_to_le16((u16)
			    (sizeof(struct host_cmd_ds_802_11_ad_hoc_join)
			     + S_DS_GEN + cmd_append_size));

	adhoc_join->bss_descriptor.cap_info_bitmap = cpu_to_le16(tmp_cap);

	return 0;
}
示例#7
0
/*
 * This function prepares command for ad-hoc start.
 *
 * Driver will fill up SSID, BSS mode, IBSS parameters, physical
 * parameters, probe delay, and capability information. Firmware
 * will fill up beacon period, basic rates and operational rates.
 *
 * In addition, the following TLVs are added -
 *      - Channel TLV
 *      - Vendor specific IE
 *      - WPA/WPA2 IE
 *      - HT Capabilities IE
 *      - HT Information IE
 *
 * Preparation also includes -
 *      - Setting command ID and proper size
 *      - Ensuring correct endian-ness
 */
int
mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
				struct host_cmd_ds_command *cmd, void *data_buf)
{
	int rsn_ie_len = 0;
	struct mwifiex_adapter *adapter = priv->adapter;
	struct host_cmd_ds_802_11_ad_hoc_start *adhoc_start =
		&cmd->params.adhoc_start;
	struct mwifiex_bssdescriptor *bss_desc;
	u32 cmd_append_size = 0;
	u32 i;
	u16 tmp_cap;
	uint16_t ht_cap_info;
	struct mwifiex_ie_types_chan_list_param_set *chan_tlv;

	struct mwifiex_ie_types_htcap *ht_cap;
	struct mwifiex_ie_types_htinfo *ht_info;
	u8 *pos = (u8 *) adhoc_start +
			sizeof(struct host_cmd_ds_802_11_ad_hoc_start);

	if (!adapter)
		return -1;

	cmd->command = cpu_to_le16(HostCmd_CMD_802_11_AD_HOC_START);

	bss_desc = &priv->curr_bss_params.bss_descriptor;
	priv->attempted_bss_desc = bss_desc;

	/*
	 * Fill in the parameters for 2 data structures:
	 *   1. struct host_cmd_ds_802_11_ad_hoc_start command
	 *   2. bss_desc
	 * Driver will fill up SSID, bss_mode,IBSS param, Physical Param,
	 * probe delay, and Cap info.
	 * Firmware will fill up beacon period, Basic rates
	 * and operational rates.
	 */

	memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN);

	memcpy(adhoc_start->ssid,
	       ((struct mwifiex_802_11_ssid *) data_buf)->ssid,
	       ((struct mwifiex_802_11_ssid *) data_buf)->ssid_len);

	dev_dbg(adapter->dev, "info: ADHOC_S_CMD: SSID = %s\n",
				adhoc_start->ssid);

	memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN);
	memcpy(bss_desc->ssid.ssid,
	       ((struct mwifiex_802_11_ssid *) data_buf)->ssid,
	       ((struct mwifiex_802_11_ssid *) data_buf)->ssid_len);

	bss_desc->ssid.ssid_len =
		((struct mwifiex_802_11_ssid *) data_buf)->ssid_len;

	/* Set the BSS mode */
	adhoc_start->bss_mode = HostCmd_BSS_MODE_IBSS;
	bss_desc->bss_mode = NL80211_IFTYPE_ADHOC;
	adhoc_start->beacon_period = cpu_to_le16(priv->beacon_period);
	bss_desc->beacon_period = priv->beacon_period;

	/* Set Physical param set */
/* Parameter IE Id */
#define DS_PARA_IE_ID   3
/* Parameter IE length */
#define DS_PARA_IE_LEN  1

	adhoc_start->phy_param_set.ds_param_set.element_id = DS_PARA_IE_ID;
	adhoc_start->phy_param_set.ds_param_set.len = DS_PARA_IE_LEN;

	if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
			(priv, adapter->adhoc_start_band, (u16)
				priv->adhoc_channel)) {
		struct mwifiex_chan_freq_power *cfp;
		cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211(priv,
				adapter->adhoc_start_band, FIRST_VALID_CHANNEL);
		if (cfp)
			priv->adhoc_channel = (u8) cfp->channel;
	}

	if (!priv->adhoc_channel) {
		dev_err(adapter->dev, "ADHOC_S_CMD: adhoc_channel cannot be 0\n");
		return -1;
	}

	dev_dbg(adapter->dev, "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
				priv->adhoc_channel);

	priv->curr_bss_params.bss_descriptor.channel = priv->adhoc_channel;
	priv->curr_bss_params.band = adapter->adhoc_start_band;

	bss_desc->channel = priv->adhoc_channel;
	adhoc_start->phy_param_set.ds_param_set.current_chan =
		priv->adhoc_channel;

	memcpy(&bss_desc->phy_param_set, &adhoc_start->phy_param_set,
	       sizeof(union ieee_types_phy_param_set));

	/* Set IBSS param set */
/* IBSS parameter IE Id */
#define IBSS_PARA_IE_ID   6
/* IBSS parameter IE length */
#define IBSS_PARA_IE_LEN  2

	adhoc_start->ss_param_set.ibss_param_set.element_id = IBSS_PARA_IE_ID;
	adhoc_start->ss_param_set.ibss_param_set.len = IBSS_PARA_IE_LEN;
	adhoc_start->ss_param_set.ibss_param_set.atim_window
		= cpu_to_le16(priv->atim_window);
	memcpy(&bss_desc->ss_param_set, &adhoc_start->ss_param_set,
	       sizeof(union ieee_types_ss_param_set));

	/* Set Capability info */
	bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_IBSS;
	tmp_cap = le16_to_cpu(adhoc_start->cap_info_bitmap);
	tmp_cap &= ~WLAN_CAPABILITY_ESS;
	tmp_cap |= WLAN_CAPABILITY_IBSS;

	/* Set up privacy in bss_desc */
	if (priv->sec_info.encryption_mode) {
		/* Ad-Hoc capability privacy on */
		dev_dbg(adapter->dev,
			"info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
		bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
		tmp_cap |= WLAN_CAPABILITY_PRIVACY;
	} else {
		dev_dbg(adapter->dev, "info: ADHOC_S_CMD: wep_status NOT set,"
				" setting privacy to ACCEPT ALL\n");
		bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
	}

	memset(adhoc_start->DataRate, 0, sizeof(adhoc_start->DataRate));
	mwifiex_get_active_data_rates(priv, adhoc_start->DataRate);
	if ((adapter->adhoc_start_band & BAND_G) &&
	    (priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
		if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
					     HostCmd_ACT_GEN_SET, 0,
					     &priv->curr_pkt_filter)) {
			dev_err(adapter->dev,
			       "ADHOC_S_CMD: G Protection config failed\n");
			return -1;
		}
	}
	/* Find the last non zero */
	for (i = 0; i < sizeof(adhoc_start->DataRate) &&
			adhoc_start->DataRate[i];
			i++)
			;

	priv->curr_bss_params.num_of_rates = i;

	/* Copy the ad-hoc creating rates into Current BSS rate structure */
	memcpy(&priv->curr_bss_params.data_rates,
	       &adhoc_start->DataRate, priv->curr_bss_params.num_of_rates);

	dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
	       adhoc_start->DataRate[0], adhoc_start->DataRate[1],
	       adhoc_start->DataRate[2], adhoc_start->DataRate[3]);

	dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");

	if (IS_SUPPORT_MULTI_BANDS(adapter)) {
		/* Append a channel TLV */
		chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
		chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
		chan_tlv->header.len =
			cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));

		memset(chan_tlv->chan_scan_param, 0x00,
		       sizeof(struct mwifiex_chan_scan_param_set));
		chan_tlv->chan_scan_param[0].chan_number =
			(u8) priv->curr_bss_params.bss_descriptor.channel;

		dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Chan = %d\n",
		       chan_tlv->chan_scan_param[0].chan_number);

		chan_tlv->chan_scan_param[0].radio_type
		       = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
		if (adapter->adhoc_start_band & BAND_GN
		    || adapter->adhoc_start_band & BAND_AN) {
			if (adapter->chan_offset == SEC_CHANNEL_ABOVE)
				chan_tlv->chan_scan_param[0].radio_type |=
					SECOND_CHANNEL_ABOVE;
			else if (adapter->chan_offset == SEC_CHANNEL_BELOW)
				chan_tlv->chan_scan_param[0].radio_type |=
					SECOND_CHANNEL_BELOW;
		}
		dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
		       chan_tlv->chan_scan_param[0].radio_type);
		pos += sizeof(chan_tlv->header) +
			sizeof(struct mwifiex_chan_scan_param_set);
		cmd_append_size +=
			sizeof(chan_tlv->header) +
			sizeof(struct mwifiex_chan_scan_param_set);
	}

	/* Append vendor specific IE TLV */
	cmd_append_size += mwifiex_cmd_append_vsie_tlv(priv,
				MWIFIEX_VSIE_MASK_ADHOC, &pos);

	if (priv->sec_info.wpa_enabled) {
		rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
		if (rsn_ie_len == -1)
			return -1;
		cmd_append_size += rsn_ie_len;
	}

	if (adapter->adhoc_11n_enabled) {
		{
			ht_cap = (struct mwifiex_ie_types_htcap *) pos;
			memset(ht_cap, 0,
			       sizeof(struct mwifiex_ie_types_htcap));
			ht_cap->header.type =
				cpu_to_le16(WLAN_EID_HT_CAPABILITY);
			ht_cap->header.len =
			       cpu_to_le16(sizeof(struct ieee80211_ht_cap));
			ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info);

			ht_cap_info |= IEEE80211_HT_CAP_SGI_20;
			if (adapter->chan_offset) {
				ht_cap_info |= IEEE80211_HT_CAP_SGI_40;
				ht_cap_info |= IEEE80211_HT_CAP_DSSSCCK40;
				ht_cap_info |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
				SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
			}

			ht_cap->ht_cap.ampdu_params_info
					= IEEE80211_HT_MAX_AMPDU_64K;
			ht_cap->ht_cap.mcs.rx_mask[0] = 0xff;
			pos += sizeof(struct mwifiex_ie_types_htcap);
			cmd_append_size +=
				sizeof(struct mwifiex_ie_types_htcap);
		}
		{
			ht_info = (struct mwifiex_ie_types_htinfo *) pos;
			memset(ht_info, 0,
			       sizeof(struct mwifiex_ie_types_htinfo));
			ht_info->header.type =
				cpu_to_le16(WLAN_EID_HT_INFORMATION);
			ht_info->header.len =
				cpu_to_le16(sizeof(struct ieee80211_ht_info));
			ht_info->ht_info.control_chan =
				(u8) priv->curr_bss_params.bss_descriptor.
				channel;
			if (adapter->chan_offset) {
				ht_info->ht_info.ht_param =
					adapter->chan_offset;
				ht_info->ht_info.ht_param |=
					IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
			}
			ht_info->ht_info.operation_mode =
			     cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
			ht_info->ht_info.basic_set[0] = 0xff;
			pos += sizeof(struct mwifiex_ie_types_htinfo);
			cmd_append_size +=
				sizeof(struct mwifiex_ie_types_htinfo);
		}
	}

	cmd->size = cpu_to_le16((u16)
			    (sizeof(struct host_cmd_ds_802_11_ad_hoc_start)
			     + S_DS_GEN + cmd_append_size));

	if (adapter->adhoc_start_band == BAND_B)
		tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
	else
		tmp_cap |= WLAN_CAPABILITY_SHORT_SLOT_TIME;

	adhoc_start->cap_info_bitmap = cpu_to_le16(tmp_cap);

	return 0;
}
示例#8
0
static ssize_t wdm_write
(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
	u8 *buf;
	int rv = -EMSGSIZE, r, we;
	struct wdm_device *desc = file->private_data;
	struct usb_ctrlrequest *req;

	if (count > desc->wMaxCommand)
		count = desc->wMaxCommand;

	spin_lock_irq(&desc->iuspin);
	we = desc->werr;
	desc->werr = 0;
	spin_unlock_irq(&desc->iuspin);
	if (we < 0)
		return -EIO;

	r = mutex_lock_interruptible(&desc->wlock); /* concurrent writes */
	rv = -ERESTARTSYS;
	if (r)
		goto outnl;

	r = usb_autopm_get_interface(desc->intf);
	if (r < 0)
		goto outnp;

	if (!(file->f_flags & O_NONBLOCK))
		r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
								&desc->flags));
	else
		if (test_bit(WDM_IN_USE, &desc->flags))
			r = -EAGAIN;
	if (r < 0)
		goto out;

	if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
		rv = -ENODEV;
		goto out;
	}

	desc->outbuf = buf = kmalloc(count, GFP_KERNEL);
	if (!buf) {
		rv = -ENOMEM;
		goto out;
	}

	r = copy_from_user(buf, buffer, count);
	if (r > 0) {
		kfree(buf);
		rv = -EFAULT;
		goto out;
	}

	req = desc->orq;
	usb_fill_control_urb(
		desc->command,
		interface_to_usbdev(desc->intf),
		/* using common endpoint 0 */
		usb_sndctrlpipe(interface_to_usbdev(desc->intf), 0),
		(unsigned char *)req,
		buf,
		count,
		wdm_out_callback,
		desc
	);

	req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS |
			     USB_RECIP_INTERFACE);
	req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
	req->wValue = 0;
	req->wIndex = desc->inum;
	req->wLength = cpu_to_le16(count);
	set_bit(WDM_IN_USE, &desc->flags);

	rv = usb_submit_urb(desc->command, GFP_KERNEL);
	if (rv < 0) {
		kfree(buf);
		clear_bit(WDM_IN_USE, &desc->flags);
		dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
	} else {
		dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
			req->wIndex);
	}
out:
	usb_autopm_put_interface(desc->intf);
outnp:
	mutex_unlock(&desc->wlock);
outnl:
	return rv < 0 ? rv : count;
}
示例#9
0
static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
	int rv = -EINVAL;
	struct usb_device *udev = interface_to_usbdev(intf);
	struct wdm_device *desc;
	struct usb_host_interface *iface;
	struct usb_endpoint_descriptor *ep;
	struct usb_cdc_dmm_desc *dmhd;
	u8 *buffer = intf->altsetting->extra;
	int buflen = intf->altsetting->extralen;
	u16 maxcom = 0;

	if (!buffer)
		goto out;

	while (buflen > 2) {
		if (buffer [1] != USB_DT_CS_INTERFACE) {
			dev_err(&intf->dev, "skipping garbage\n");
			goto next_desc;
		}

		switch (buffer [2]) {
		case USB_CDC_HEADER_TYPE:
			break;
		case USB_CDC_DMM_TYPE:
			dmhd = (struct usb_cdc_dmm_desc *)buffer;
			maxcom = le16_to_cpu(dmhd->wMaxCommand);
			dev_dbg(&intf->dev,
				"Finding maximum buffer length: %d", maxcom);
			break;
		default:
			dev_err(&intf->dev,
				"Ignoring extra header, type %d, length %d\n",
				buffer[2], buffer[0]);
			break;
		}
next_desc:
		buflen -= buffer[0];
		buffer += buffer[0];
	}

	rv = -ENOMEM;
	desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL);
	if (!desc)
		goto out;
	mutex_init(&desc->wlock);
	mutex_init(&desc->rlock);
	mutex_init(&desc->plock);
	spin_lock_init(&desc->iuspin);
	init_waitqueue_head(&desc->wait);
	desc->wMaxCommand = maxcom;
	/* this will be expanded and needed in hardware endianness */
	desc->inum = cpu_to_le16((u16)intf->cur_altsetting->desc.bInterfaceNumber);
	desc->intf = intf;
	INIT_WORK(&desc->rxwork, wdm_rxwork);

	rv = -EINVAL;
	iface = intf->cur_altsetting;
	if (iface->desc.bNumEndpoints != 1)
		goto err;
	ep = &iface->endpoint[0].desc;
	if (!ep || !usb_endpoint_is_int_in(ep))
		goto err;

	desc->wMaxPacketSize = le16_to_cpu(ep->wMaxPacketSize);
	desc->bMaxPacketSize0 = udev->descriptor.bMaxPacketSize0;

	desc->orq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
	if (!desc->orq)
		goto err;
	desc->irq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
	if (!desc->irq)
		goto err;

	desc->validity = usb_alloc_urb(0, GFP_KERNEL);
	if (!desc->validity)
		goto err;

	desc->response = usb_alloc_urb(0, GFP_KERNEL);
	if (!desc->response)
		goto err;

	desc->command = usb_alloc_urb(0, GFP_KERNEL);
	if (!desc->command)
		goto err;

	desc->ubuf = kmalloc(desc->wMaxCommand, GFP_KERNEL);
	if (!desc->ubuf)
		goto err;

	desc->sbuf = usb_buffer_alloc(interface_to_usbdev(intf),
					desc->wMaxPacketSize,
					GFP_KERNEL,
					&desc->validity->transfer_dma);
	if (!desc->sbuf)
		goto err;

	desc->inbuf = usb_buffer_alloc(interface_to_usbdev(intf),
					desc->bMaxPacketSize0,
					GFP_KERNEL,
					&desc->response->transfer_dma);
	if (!desc->inbuf)
		goto err2;

	usb_fill_int_urb(
		desc->validity,
		interface_to_usbdev(intf),
		usb_rcvintpipe(interface_to_usbdev(intf), ep->bEndpointAddress),
		desc->sbuf,
		desc->wMaxPacketSize,
		wdm_int_callback,
		desc,
		ep->bInterval
	);
	desc->validity->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

	usb_set_intfdata(intf, desc);
	rv = usb_register_dev(intf, &wdm_class);
	if (rv < 0)
		goto err3;
	else
		dev_info(&intf->dev, "cdc-wdm%d: USB WDM device\n",
			intf->minor - WDM_MINOR_BASE);
out:
	return rv;
err3:
	usb_set_intfdata(intf, NULL);
	usb_buffer_free(interface_to_usbdev(desc->intf),
			desc->bMaxPacketSize0,
			desc->inbuf,
			desc->response->transfer_dma);
err2:
	usb_buffer_free(interface_to_usbdev(desc->intf),
			desc->wMaxPacketSize,
			desc->sbuf,
			desc->validity->transfer_dma);
err:
	free_urbs(desc);
	kfree(desc->ubuf);
	kfree(desc->orq);
	kfree(desc->irq);
	kfree(desc);
	return rv;
}
示例#10
0
文件: scan.c 项目: ManCheol/kernel
	int ret;
	u32 status;
	int ssid_len = 0;
	u8 *ssid = NULL;

	lockdep_assert_held(&mvm->mutex);
	BUG_ON(mvm->scan_cmd == NULL);

	IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
	mvm->scan_status = IWL_MVM_SCAN_OS;
	memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
	       mvm->fw->ucode_capa.max_probe_length +
	       (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));

	cmd->channel_count = (u8)req->n_channels;
	cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
	cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
	cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
	cmd->max_out_time = iwl_mvm_scan_max_out_time(vif);
	cmd->suspend_time = iwl_mvm_scan_suspend_time(vif);
	cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
	cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
					MAC_FILTER_IN_BEACON);

	if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
		cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED);
	else
		cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);

	cmd->repeats = cpu_to_le32(1);
示例#11
0
static void wdm_int_callback(struct urb *urb)
{
	int rv = 0;
	int status = urb->status;
	struct wdm_device *desc;
	struct usb_ctrlrequest *req;
	struct usb_cdc_notification *dr;

	desc = urb->context;
	req = desc->irq;
	dr = (struct usb_cdc_notification *)desc->sbuf;

	if (status) {
		switch (status) {
		case -ESHUTDOWN:
		case -ENOENT:
		case -ECONNRESET:
			return; /* unplug */
		case -EPIPE:
			set_bit(WDM_INT_STALL, &desc->flags);
			dev_err(&desc->intf->dev, "Stall on int endpoint\n");
			goto sw; /* halt is cleared in work */
		default:
			dev_err(&desc->intf->dev,
				"nonzero urb status received: %d\n", status);
			break;
		}
	}

	if (urb->actual_length < sizeof(struct usb_cdc_notification)) {
		dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
			urb->actual_length);
		goto exit;
	}

	switch (dr->bNotificationType) {
	case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
		dev_dbg(&desc->intf->dev,
			"NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
			dr->wIndex, dr->wLength);
		break;

	case USB_CDC_NOTIFY_NETWORK_CONNECTION:

		dev_dbg(&desc->intf->dev,
			"NOTIFY_NETWORK_CONNECTION %s network",
			dr->wValue ? "connected to" : "disconnected from");
		goto exit;
	default:
		clear_bit(WDM_POLL_RUNNING, &desc->flags);
		dev_err(&desc->intf->dev,
			"unknown notification %d received: index %d len %d\n",
			dr->bNotificationType, dr->wIndex, dr->wLength);
		goto exit;
	}

	req->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
	req->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
	req->wValue = 0;
	req->wIndex = desc->inum;
	req->wLength = cpu_to_le16(desc->wMaxCommand);

	usb_fill_control_urb(
		desc->response,
		interface_to_usbdev(desc->intf),
		/* using common endpoint 0 */
		usb_rcvctrlpipe(interface_to_usbdev(desc->intf), 0),
		(unsigned char *)req,
		desc->inbuf,
		desc->wMaxCommand,
		wdm_in_callback,
		desc
	);
	desc->response->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
	spin_lock(&desc->iuspin);
	clear_bit(WDM_READ, &desc->flags);
	if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
		rv = usb_submit_urb(desc->response, GFP_ATOMIC);
		dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
			__func__, rv);
	}
	spin_unlock(&desc->iuspin);
	if (rv < 0) {
		if (rv == -EPERM)
			return;
		if (rv == -ENOMEM) {
sw:
			rv = schedule_work(&desc->rxwork);
			if (rv)
				dev_err(&desc->intf->dev,
					"Cannot schedule work\n");
		}
	}
exit:
	rv = usb_submit_urb(urb, GFP_ATOMIC);
	if (rv)
		dev_err(&desc->intf->dev,
			"%s - usb_submit_urb failed with result %d\n",
			__func__, rv);

}
示例#12
0
/*
 * Create and transmit a management frame using "operation" and "oid",
 * with arguments data/length.
 * We either return an error and free the frame, or we return 0 and
 * islpci_mgt_cleanup_transmit() frees the frame in the tx-done
 * interrupt.
 */
static int
islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
		    void *data, int length)
{
	islpci_private *priv = netdev_priv(ndev);
	isl38xx_control_block *cb =
	    (isl38xx_control_block *) priv->control_block;
	void *p;
	int err = -EINVAL;
	unsigned long flags;
	isl38xx_fragment *frag;
	struct islpci_membuf buf;
	u32 curr_frag;
	int index;
	int frag_len = length + PIMFOR_HEADER_SIZE;

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_transmit\n");
#endif

	if (frag_len > MGMT_FRAME_SIZE) {
		printk(KERN_DEBUG "%s: mgmt frame too large %d\n",
		       ndev->name, frag_len);
		goto error;
	}

	err = -ENOMEM;
	p = buf.mem = kmalloc(frag_len, GFP_KERNEL);
	if (!buf.mem)
		goto error;

	buf.size = frag_len;

	/* create the header directly in the fragment data area */
	pimfor_encode_header(operation, oid, length, (pimfor_header_t *) p);
	p += PIMFOR_HEADER_SIZE;

	if (data)
		memcpy(p, data, length);
	else
		memset(p, 0, length);

#if VERBOSE > SHOW_ERROR_MESSAGES
	{
		pimfor_header_t *h = buf.mem;
		DEBUG(SHOW_PIMFOR_FRAMES,
		      "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n",
		      h->operation, oid, h->device_id, h->flags, length);

		/* display the buffer contents for debugging */
		display_buffer((char *) h, sizeof (pimfor_header_t));
		display_buffer(p, length);
	}
#endif

	err = -ENOMEM;
	buf.pci_addr = pci_map_single(priv->pdev, buf.mem, frag_len,
				      PCI_DMA_TODEVICE);
	if (!buf.pci_addr) {
		printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n",
		       ndev->name);
		goto error_free;
	}

	/* Protect the control block modifications against interrupts. */
	spin_lock_irqsave(&priv->slock, flags);
	curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ]);
	if (curr_frag - priv->index_mgmt_tx >= ISL38XX_CB_MGMT_QSIZE) {
		printk(KERN_WARNING "%s: mgmt tx queue is still full\n",
		       ndev->name);
		goto error_unlock;
	}

	/* commit the frame to the tx device queue */
	index = curr_frag % ISL38XX_CB_MGMT_QSIZE;
	priv->mgmt_tx[index] = buf;
	frag = &cb->tx_data_mgmt[index];
	frag->size = cpu_to_le16(frag_len);
	frag->flags = 0;	/* for any other than the last fragment, set to 1 */
	frag->address = cpu_to_le32(buf.pci_addr);

	/* The fragment address in the control block must have
	 * been written before announcing the frame buffer to
	 * device */
	wmb();
	cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ] = cpu_to_le32(curr_frag + 1);
	spin_unlock_irqrestore(&priv->slock, flags);

	/* trigger the device */
	islpci_trigger(priv);
	return 0;

      error_unlock:
	spin_unlock_irqrestore(&priv->slock, flags);
      error_free:
	kfree(buf.mem);
      error:
	return err;
}
示例#13
0
/**
 * ext4_read_block_bitmap()
 * @sb:			super block
 * @block_group:	given block group
 *
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
 *
 * Return buffer_head on success or NULL in case of failure.
 */
struct buffer_head *
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
{
	struct ext4_group_desc *desc;
	struct buffer_head *bh = NULL;
	ext4_fsblk_t bitmap_blk;

	desc = ext4_get_group_desc(sb, block_group, NULL);
	if (!desc)
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
		ext4_error(sb, __func__,
			    "Cannot read block bitmap - "
			    "block_group = %u, block_bitmap = %llu",
			    block_group, bitmap_blk);
		return NULL;
	}

	if (bitmap_uptodate(bh))
		return bh;

	lock_buffer(bh);
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
		return bh;
	}
	ext4_lock_group(sb, block_group);
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
		ext4_init_block_bitmap(sb, bh, block_group, desc);
		set_bitmap_uptodate(bh);
		set_buffer_uptodate(bh);
		ext4_unlock_group(sb, block_group);
		unlock_buffer(bh);
		return bh;
	}
	ext4_unlock_group(sb, block_group);
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
		return bh;
	}
	/*
	 * submit the buffer_head for read. We can
	 * safely mark the bitmap as uptodate now.
	 * We do it here so the bitmap uptodate bit
	 * get set with buffer lock held.
	 */
	set_bitmap_uptodate(bh);
	if (bh_submit_read(bh) < 0) {
		put_bh(bh);
		ext4_error(sb, __func__,
			    "Cannot read block bitmap - "
			    "block_group = %u, block_bitmap = %llu",
			    block_group, bitmap_blk);
		return NULL;
	}
	ext4_valid_block_bitmap(sb, desc, block_group, bh);
	/*
	 * file system mounted not to panic on error,
	 * continue with corrupt bitmap
	 */
	return bh;
}
示例#14
0
void ixp425_outsw(u32 p, u16 *addr, u32 count)
{
	while (count--)
		outw(cpu_to_le16(*addr++), p);
}
示例#15
0
static int macb_init(struct eth_device *netdev, bd_t *bd)
{
	struct macb_device *macb = to_macb(netdev);
	unsigned long paddr;
	u32 hwaddr_bottom;
	u16 hwaddr_top;
	int i;

	/*
	 * macb_halt should have been called at some point before now,
	 * so we'll assume the controller is idle.
	 */

	/* initialize DMA descriptors */
	paddr = macb->rx_buffer_dma;
	for (i = 0; i < CFG_MACB_RX_RING_SIZE; i++) {
		if (i == (CFG_MACB_RX_RING_SIZE - 1))
			paddr |= RXADDR_WRAP;
		macb->rx_ring[i].addr = paddr;
		macb->rx_ring[i].ctrl = 0;
		paddr += 128;
	}
	for (i = 0; i < CFG_MACB_TX_RING_SIZE; i++) {
		macb->tx_ring[i].addr = 0;
		if (i == (CFG_MACB_TX_RING_SIZE - 1))
			macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP;
		else
			macb->tx_ring[i].ctrl = TXBUF_USED;
	}
	macb->rx_tail = macb->tx_head = macb->tx_tail = 0;

	macb_writel(macb, RBQP, macb->rx_ring_dma);
	macb_writel(macb, TBQP, macb->tx_ring_dma);

	/* set hardware address */
	hwaddr_bottom = cpu_to_le32(*((u32 *)netdev->enetaddr));
	macb_writel(macb, SA1B, hwaddr_bottom);
	hwaddr_top = cpu_to_le16(*((u16 *)(netdev->enetaddr + 4)));
	macb_writel(macb, SA1T, hwaddr_top);

	/* choose RMII or MII mode. This depends on the board */
#ifdef CONFIG_RMII
#if defined(CONFIG_AVR32)
	macb_writel(macb, USRIO, 0);
#else
	macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN));
#endif
#else
#if defined(CONFIG_AVR32)
	macb_writel(macb, USRIO, MACB_BIT(MII));
#else
	macb_writel(macb, USRIO, MACB_BIT(CLKEN));
#endif
#endif /* CONFIG_RMII */

	if (!macb_phy_init(macb))
		return -1;

	/* Enable TX and RX */
	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));

	return 0;
}
示例#16
0
/* hard_start_xmit function for master radio interface wifi#.
 * AP processing (TX rate control, power save buffering, etc.).
 * Use hardware TX function to send the frame. */
int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct hostap_interface *iface;
	local_info_t *local;
	int ret = NETDEV_TX_BUSY;
	u16 fc;
	struct hostap_tx_data tx;
	ap_tx_ret tx_ret;
	struct hostap_skb_tx_data *meta;
	int no_encrypt = 0;
	struct ieee80211_hdr *hdr;

	iface = netdev_priv(dev);
	local = iface->local;

	tx.skb = skb;
	tx.sta_ptr = NULL;

	meta = (struct hostap_skb_tx_data *) skb->cb;
	if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) {
		printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, "
		       "expected 0x%08x)\n",
		       dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC);
		ret = 0;
		iface->stats.tx_dropped++;
		goto fail;
	}

	if (local->host_encrypt) {
		/* Set crypt to default algorithm and key; will be replaced in
		 * AP code if STA has own alg/key */
		tx.crypt = local->crypt_info.crypt[local->crypt_info.tx_keyidx];
		tx.host_encrypt = 1;
	} else {
		tx.crypt = NULL;
		tx.host_encrypt = 0;
	}

	if (skb->len < 24) {
		printk(KERN_DEBUG "%s: hostap_master_start_xmit: short skb "
		       "(len=%d)\n", dev->name, skb->len);
		ret = 0;
		iface->stats.tx_dropped++;
		goto fail;
	}

	/* FIX (?):
	 * Wi-Fi 802.11b test plan suggests that AP should ignore power save
	 * bit in authentication and (re)association frames and assume tha
	 * STA remains awake for the response. */
	tx_ret = hostap_handle_sta_tx(local, &tx);
	skb = tx.skb;
	meta = (struct hostap_skb_tx_data *) skb->cb;
	hdr = (struct ieee80211_hdr *) skb->data;
	fc = le16_to_cpu(hdr->frame_control);
	switch (tx_ret) {
	case AP_TX_CONTINUE:
		break;
	case AP_TX_CONTINUE_NOT_AUTHORIZED:
		if (local->ieee_802_1x &&
		    ieee80211_is_data(hdr->frame_control) &&
		    meta->ethertype != ETH_P_PAE &&
		    !(meta->flags & HOSTAP_TX_FLAGS_WDS)) {
			printk(KERN_DEBUG "%s: dropped frame to unauthorized "
			       "port (IEEE 802.1X): ethertype=0x%04x\n",
			       dev->name, meta->ethertype);
			hostap_dump_tx_80211(dev->name, skb);

			ret = 0; /* drop packet */
			iface->stats.tx_dropped++;
			goto fail;
		}
		break;
	case AP_TX_DROP:
		ret = 0; /* drop packet */
		iface->stats.tx_dropped++;
		goto fail;
	case AP_TX_RETRY:
		goto fail;
	case AP_TX_BUFFERED:
		/* do not free skb here, it will be freed when the
		 * buffered frame is sent/timed out */
		ret = 0;
		goto tx_exit;
	}

	/* Request TX callback if protocol version is 2 in 802.11 header;
	 * this version 2 is a special case used between hostapd and kernel
	 * driver */
	if (((fc & IEEE80211_FCTL_VERS) == BIT(1)) &&
	    local->ap && local->ap->tx_callback_idx && meta->tx_cb_idx == 0) {
		meta->tx_cb_idx = local->ap->tx_callback_idx;

		/* remove special version from the frame header */
		fc &= ~IEEE80211_FCTL_VERS;
		hdr->frame_control = cpu_to_le16(fc);
	}

	if (!ieee80211_is_data(hdr->frame_control)) {
		no_encrypt = 1;
		tx.crypt = NULL;
	}

	if (local->ieee_802_1x && meta->ethertype == ETH_P_PAE && tx.crypt &&
	    !(fc & IEEE80211_FCTL_PROTECTED)) {
		no_encrypt = 1;
		PDEBUG(DEBUG_EXTRA2, "%s: TX: IEEE 802.1X - passing "
		       "unencrypted EAPOL frame\n", dev->name);
		tx.crypt = NULL; /* no encryption for IEEE 802.1X frames */
	}

	if (tx.crypt && (!tx.crypt->ops || !tx.crypt->ops->encrypt_mpdu))
		tx.crypt = NULL;
	else if ((tx.crypt ||
		 local->crypt_info.crypt[local->crypt_info.tx_keyidx]) &&
		 !no_encrypt) {
		/* Add ISWEP flag both for firmware and host based encryption
		 */
		fc |= IEEE80211_FCTL_PROTECTED;
		hdr->frame_control = cpu_to_le16(fc);
	} else if (local->drop_unencrypted &&
		   ieee80211_is_data(hdr->frame_control) &&
		   meta->ethertype != ETH_P_PAE) {
		if (net_ratelimit()) {
			printk(KERN_DEBUG "%s: dropped unencrypted TX data "
			       "frame (drop_unencrypted=1)\n", dev->name);
		}
		iface->stats.tx_dropped++;
		ret = 0;
		goto fail;
	}

	if (tx.crypt) {
		skb = hostap_tx_encrypt(skb, tx.crypt);
		if (skb == NULL) {
			printk(KERN_DEBUG "%s: TX - encryption failed\n",
			       dev->name);
			ret = 0;
			goto fail;
		}
		meta = (struct hostap_skb_tx_data *) skb->cb;
		if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) {
			printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, "
			       "expected 0x%08x) after hostap_tx_encrypt\n",
			       dev->name, meta->magic,
			       HOSTAP_SKB_TX_DATA_MAGIC);
			ret = 0;
			iface->stats.tx_dropped++;
			goto fail;
		}
	}

	if (local->func->tx == NULL || local->func->tx(skb, dev)) {
		ret = 0;
		iface->stats.tx_dropped++;
	} else {
		ret = 0;
		iface->stats.tx_packets++;
		iface->stats.tx_bytes += skb->len;
	}

 fail:
	if (!ret && skb)
		dev_kfree_skb(skb);
 tx_exit:
	if (tx.sta_ptr)
		hostap_handle_sta_release(tx.sta_ptr);
	return ret;
}
示例#17
0
static int fastboot_bind(struct usb_gadget *gadget)
{
	struct fastboot_dev *dev = &l_fbdev;
	u8 cdc = 1, zlp = 1;
	struct usb_ep *in_ep, *out_ep;
	int gcnum;
	u8 tmp[7];

	debug("%s controller :%s recognized\n", __func__, gadget->name);
	gcnum = usb_gadget_controller_number(gadget);
	if (gcnum >= 0)
		device_desc.bcdDevice = cpu_to_le16(0x0300 + gcnum);
	else {
		/*
		 * can't assume CDC works.  don't want to default to
		 * anything less functional on CDC-capable hardware,
		 * so we fail in this case.
		 */
		error("controller '%s' not recognized", gadget->name);
		return -ENODEV;
	}

	if (bcdDevice)
		device_desc.bcdDevice = cpu_to_le16(bcdDevice);
	if (iManufacturer)
		strlcpy(manufacturer, iManufacturer, sizeof manufacturer);
	if (iProduct)
		strlcpy(product_desc, iProduct, sizeof product_desc);

	iSerialNumber = get_product_sn();
	device_desc.iSerialNumber = STRING_SERIALNUMBER,
	strlcpy(serial_number, iSerialNumber, sizeof serial_number);

	/* all we really need is bulk IN/OUT */
	usb_ep_autoconfig_reset(gadget);
	in_ep = usb_ep_autoconfig(gadget, &fs_source_desc);
	if (!in_ep) {
autoconf_fail:
		error("can't autoconfigure on %s\n", gadget->name);
		return -ENODEV;
	}
	in_ep->driver_data = in_ep;	/* claim */

	out_ep = usb_ep_autoconfig(gadget, &fs_sink_desc);
	if (!out_ep)
		goto autoconf_fail;
	out_ep->driver_data = out_ep;	/* claim */

	device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
	usb_gadget_set_selfpowered(gadget);

	if (gadget_is_dualspeed(gadget)) {

		/* assumes ep0 uses the same value for both speeds ... */
		dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;

		/* and that all endpoints are dual-speed */
		hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
		hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
	}

	dev->network_started = 0;
	dev->in_ep = in_ep;
	dev->out_ep = out_ep;

	/* preallocate control message data and buffer */
	dev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
	if (!dev->req)
		goto fail;
	dev->req->buf = control_req;
	dev->req->complete = fastboot_setup_complete;

	/* ... and maybe likewise for status transfer */

	/* finish hookup to lower layer ... */
	dev->gadget = gadget;
	set_gadget_data(gadget, dev);
	gadget->ep0->driver_data = dev;

	debug("bind controller with the driver\n");
	/*
	 * two kinds of host-initiated state changes:
	 *  - iff DATA transfer is active, carrier is "on"
	 *  - tx queueing enabled if open *and* carrier is "on"
	 */
	return 0;

fail:
	error("%s failed", __func__);
	fastboot_unbind(gadget);
	return -ENOMEM;
}
示例#18
0
/* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta)
 * Convert Ethernet header into a suitable IEEE 802.11 header depending on
 * device configuration. */
int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct hostap_interface *iface;
	local_info_t *local;
	int need_headroom, need_tailroom = 0;
	struct ieee80211_hdr hdr;
	u16 fc, ethertype = 0;
	enum {
		WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME
	} use_wds = WDS_NO;
	u8 *encaps_data;
	int hdr_len, encaps_len, skip_header_bytes;
	int to_assoc_ap = 0;
	struct hostap_skb_tx_data *meta;

	iface = netdev_priv(dev);
	local = iface->local;

	if (skb->len < ETH_HLEN) {
		printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb "
		       "(len=%d)\n", dev->name, skb->len);
		kfree_skb(skb);
		return 0;
	}

	if (local->ddev != dev) {
		use_wds = (local->iw_mode == IW_MODE_MASTER &&
			   !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ?
			WDS_OWN_FRAME : WDS_COMPLIANT_FRAME;
		if (dev == local->stadev) {
			to_assoc_ap = 1;
			use_wds = WDS_NO;
		} else if (dev == local->apdev) {
			printk(KERN_DEBUG "%s: prism2_tx: trying to use "
			       "AP device with Ethernet net dev\n", dev->name);
			kfree_skb(skb);
			return 0;
		}
	} else {
		if (local->iw_mode == IW_MODE_REPEAT) {
			printk(KERN_DEBUG "%s: prism2_tx: trying to use "
			       "non-WDS link in Repeater mode\n", dev->name);
			kfree_skb(skb);
			return 0;
		} else if (local->iw_mode == IW_MODE_INFRA &&
			   (local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
			   memcmp(skb->data + ETH_ALEN, dev->dev_addr,
				  ETH_ALEN) != 0) {
			/* AP client mode: send frames with foreign src addr
			 * using 4-addr WDS frames */
			use_wds = WDS_COMPLIANT_FRAME;
		}
	}

	/* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload
	 * ==>
	 * Prism2 TX frame with 802.11 header:
	 * txdesc (address order depending on used mode; includes dst_addr and
	 * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel;
	 * proto[2], payload {, possible addr4[6]} */

	ethertype = (skb->data[12] << 8) | skb->data[13];

	memset(&hdr, 0, sizeof(hdr));

	/* Length of data after IEEE 802.11 header */
	encaps_data = NULL;
	encaps_len = 0;
	skip_header_bytes = ETH_HLEN;
	if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
		encaps_data = bridge_tunnel_header;
		encaps_len = sizeof(bridge_tunnel_header);
		skip_header_bytes -= 2;
	} else if (ethertype >= 0x600) {
		encaps_data = rfc1042_header;
		encaps_len = sizeof(rfc1042_header);
		skip_header_bytes -= 2;
	}

	fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
	hdr_len = IEEE80211_DATA_HDR3_LEN;

	if (use_wds != WDS_NO) {
		/* Note! Prism2 station firmware has problems with sending real
		 * 802.11 frames with four addresses; until these problems can
		 * be fixed or worked around, 4-addr frames needed for WDS are
		 * using incompatible format: FromDS flag is not set and the
		 * fourth address is added after the frame payload; it is
		 * assumed, that the receiving station knows how to handle this
		 * frame format */

		if (use_wds == WDS_COMPLIANT_FRAME) {
			fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
			/* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA,
			 * Addr4 = SA */
			skb_copy_from_linear_data_offset(skb, ETH_ALEN,
							 &hdr.addr4, ETH_ALEN);
			hdr_len += ETH_ALEN;
		} else {
			/* bogus 4-addr format to workaround Prism2 station
			 * f/w bug */
			fc |= IEEE80211_FCTL_TODS;
			/* From DS: Addr1 = DA (used as RA),
			 * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA),
			 */

			/* SA from skb->data + ETH_ALEN will be added after
			 * frame payload; use hdr.addr4 as a temporary buffer
			 */
			skb_copy_from_linear_data_offset(skb, ETH_ALEN,
							 &hdr.addr4, ETH_ALEN);
			need_tailroom += ETH_ALEN;
		}

		/* send broadcast and multicast frames to broadcast RA, if
		 * configured; otherwise, use unicast RA of the WDS link */
		if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) &&
		    skb->data[0] & 0x01)
			memset(&hdr.addr1, 0xff, ETH_ALEN);
		else if (iface->type == HOSTAP_INTERFACE_WDS)
			memcpy(&hdr.addr1, iface->u.wds.remote_addr,
			       ETH_ALEN);
		else
			memcpy(&hdr.addr1, local->bssid, ETH_ALEN);
		memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
		skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) {
		fc |= IEEE80211_FCTL_FROMDS;
		/* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */
		skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
		memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3,
						 ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) {
		fc |= IEEE80211_FCTL_TODS;
		/* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
		memcpy(&hdr.addr1, to_assoc_ap ?
		       local->assoc_ap_addr : local->bssid, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
						 ETH_ALEN);
		skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_ADHOC) {
		/* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
		skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
						 ETH_ALEN);
		memcpy(&hdr.addr3, local->bssid, ETH_ALEN);
	}

	hdr.frame_control = cpu_to_le16(fc);

	skb_pull(skb, skip_header_bytes);
	need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len;
	if (skb_tailroom(skb) < need_tailroom) {
		skb = skb_unshare(skb, GFP_ATOMIC);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return 0;
		}
		if (pskb_expand_head(skb, need_headroom, need_tailroom,
				     GFP_ATOMIC)) {
			kfree_skb(skb);
			iface->stats.tx_dropped++;
			return 0;
		}
	} else if (skb_headroom(skb) < need_headroom) {
		struct sk_buff *tmp = skb;
		skb = skb_realloc_headroom(skb, need_headroom);
		kfree_skb(tmp);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return 0;
		}
	} else {
		skb = skb_unshare(skb, GFP_ATOMIC);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return 0;
		}
	}

	if (encaps_data)
		memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
	memcpy(skb_push(skb, hdr_len), &hdr, hdr_len);
	if (use_wds == WDS_OWN_FRAME) {
		memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN);
	}

	iface->stats.tx_packets++;
	iface->stats.tx_bytes += skb->len;

	skb_reset_mac_header(skb);
	meta = (struct hostap_skb_tx_data *) skb->cb;
	memset(meta, 0, sizeof(*meta));
	meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
	if (use_wds)
		meta->flags |= HOSTAP_TX_FLAGS_WDS;
	meta->ethertype = ethertype;
	meta->iface = iface;

	/* Send IEEE 802.11 encapsulated frame using the master radio device */
	skb->dev = local->dev;
	dev_queue_xmit(skb);
	return 0;
}
示例#19
0
/*
 * This function prepares command for association.
 *
 * This sets the following parameters -
 *      - Peer MAC address
 *      - Listen interval
 *      - Beacon interval
 *      - Capability information
 *
 * ...and the following TLVs, as required -
 *      - SSID TLV
 *      - PHY TLV
 *      - SS TLV
 *      - Rates TLV
 *      - Authentication TLV
 *      - Channel TLV
 *      - WPA/WPA2 IE
 *      - 11n TLV
 *      - Vendor specific TLV
 *      - WMM TLV
 *      - WAPI IE
 *      - Generic IE
 *      - TSF TLV
 *
 * Preparation also includes -
 *      - Setting command ID and proper size
 *      - Ensuring correct endian-ness
 */
int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
				 struct host_cmd_ds_command *cmd,
				 void *data_buf)
{
	struct host_cmd_ds_802_11_associate *assoc = &cmd->params.associate;
	struct mwifiex_bssdescriptor *bss_desc;
	struct mwifiex_ie_types_ssid_param_set *ssid_tlv;
	struct mwifiex_ie_types_phy_param_set *phy_tlv;
	struct mwifiex_ie_types_ss_param_set *ss_tlv;
	struct mwifiex_ie_types_rates_param_set *rates_tlv;
	struct mwifiex_ie_types_auth_type *auth_tlv;
	struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
	u8 rates[MWIFIEX_SUPPORTED_RATES];
	u32 rates_size;
	u16 tmp_cap;
	u8 *pos;
	int rsn_ie_len = 0;

	bss_desc = (struct mwifiex_bssdescriptor *) data_buf;
	pos = (u8 *) assoc;

	mwifiex_cfg_tx_buf(priv, bss_desc);

	cmd->command = cpu_to_le16(HostCmd_CMD_802_11_ASSOCIATE);

	/* Save so we know which BSS Desc to use in the response handler */
	priv->attempted_bss_desc = bss_desc;

	memcpy(assoc->peer_sta_addr,
	       bss_desc->mac_address, sizeof(assoc->peer_sta_addr));
	pos += sizeof(assoc->peer_sta_addr);

	/* Set the listen interval */
	assoc->listen_interval = cpu_to_le16(priv->listen_interval);
	/* Set the beacon period */
	assoc->beacon_period = cpu_to_le16(bss_desc->beacon_period);

	pos += sizeof(assoc->cap_info_bitmap);
	pos += sizeof(assoc->listen_interval);
	pos += sizeof(assoc->beacon_period);
	pos += sizeof(assoc->dtim_period);

	ssid_tlv = (struct mwifiex_ie_types_ssid_param_set *) pos;
	ssid_tlv->header.type = cpu_to_le16(WLAN_EID_SSID);
	ssid_tlv->header.len = cpu_to_le16((u16) bss_desc->ssid.ssid_len);
	memcpy(ssid_tlv->ssid, bss_desc->ssid.ssid,
		le16_to_cpu(ssid_tlv->header.len));
	pos += sizeof(ssid_tlv->header) + le16_to_cpu(ssid_tlv->header.len);

	phy_tlv = (struct mwifiex_ie_types_phy_param_set *) pos;
	phy_tlv->header.type = cpu_to_le16(WLAN_EID_DS_PARAMS);
	phy_tlv->header.len = cpu_to_le16(sizeof(phy_tlv->fh_ds.ds_param_set));
	memcpy(&phy_tlv->fh_ds.ds_param_set,
	       &bss_desc->phy_param_set.ds_param_set.current_chan,
	       sizeof(phy_tlv->fh_ds.ds_param_set));
	pos += sizeof(phy_tlv->header) + le16_to_cpu(phy_tlv->header.len);

	ss_tlv = (struct mwifiex_ie_types_ss_param_set *) pos;
	ss_tlv->header.type = cpu_to_le16(WLAN_EID_CF_PARAMS);
	ss_tlv->header.len = cpu_to_le16(sizeof(ss_tlv->cf_ibss.cf_param_set));
	pos += sizeof(ss_tlv->header) + le16_to_cpu(ss_tlv->header.len);

	/* Get the common rates supported between the driver and the BSS Desc */
	if (mwifiex_setup_rates_from_bssdesc
	    (priv, bss_desc, rates, &rates_size))
		return -1;

	/* Save the data rates into Current BSS state structure */
	priv->curr_bss_params.num_of_rates = rates_size;
	memcpy(&priv->curr_bss_params.data_rates, rates, rates_size);

	/* Setup the Rates TLV in the association command */
	rates_tlv = (struct mwifiex_ie_types_rates_param_set *) pos;
	rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
	rates_tlv->header.len = cpu_to_le16((u16) rates_size);
	memcpy(rates_tlv->rates, rates, rates_size);
	pos += sizeof(rates_tlv->header) + rates_size;
	dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: rates size = %d\n",
					rates_size);

	/* Add the Authentication type to be used for Auth frames */
	auth_tlv = (struct mwifiex_ie_types_auth_type *) pos;
	auth_tlv->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
	auth_tlv->header.len = cpu_to_le16(sizeof(auth_tlv->auth_type));
	if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED)
		auth_tlv->auth_type = cpu_to_le16(
				(u16) priv->sec_info.authentication_mode);
	else
		auth_tlv->auth_type = cpu_to_le16(NL80211_AUTHTYPE_OPEN_SYSTEM);

	pos += sizeof(auth_tlv->header) + le16_to_cpu(auth_tlv->header.len);

	if (IS_SUPPORT_MULTI_BANDS(priv->adapter)
	    && !(ISSUPP_11NENABLED(priv->adapter->fw_cap_info)
		&& (!bss_desc->disable_11n)
		 && (priv->adapter->config_bands & BAND_GN
		     || priv->adapter->config_bands & BAND_AN)
		 && (bss_desc->bcn_ht_cap)
	    )
		) {
		/* Append a channel TLV for the channel the attempted AP was
		   found on */
		chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
		chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
		chan_tlv->header.len =
			cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));

		memset(chan_tlv->chan_scan_param, 0x00,
		       sizeof(struct mwifiex_chan_scan_param_set));
		chan_tlv->chan_scan_param[0].chan_number =
			(bss_desc->phy_param_set.ds_param_set.current_chan);
		dev_dbg(priv->adapter->dev, "info: Assoc: TLV Chan = %d\n",
		       chan_tlv->chan_scan_param[0].chan_number);

		chan_tlv->chan_scan_param[0].radio_type =
			mwifiex_band_to_radio_type((u8) bss_desc->bss_band);

		dev_dbg(priv->adapter->dev, "info: Assoc: TLV Band = %d\n",
		       chan_tlv->chan_scan_param[0].radio_type);
		pos += sizeof(chan_tlv->header) +
			sizeof(struct mwifiex_chan_scan_param_set);
	}

	if (!priv->wps.session_enable) {
		if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
			rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);

		if (rsn_ie_len == -1)
			return -1;
	}

	if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info)
		&& (!bss_desc->disable_11n)
	    && (priv->adapter->config_bands & BAND_GN
		|| priv->adapter->config_bands & BAND_AN))
		mwifiex_cmd_append_11n_tlv(priv, bss_desc, &pos);

	/* Append vendor specific IE TLV */
	mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_ASSOC, &pos);

	mwifiex_wmm_process_association_req(priv, &pos, &bss_desc->wmm_ie,
					    bss_desc->bcn_ht_cap);
	if (priv->sec_info.wapi_enabled && priv->wapi_ie_len)
		mwifiex_cmd_append_wapi_ie(priv, &pos);

	mwifiex_cmd_append_generic_ie(priv, &pos);

	mwifiex_cmd_append_tsf_tlv(priv, &pos, bss_desc);

	cmd->size = cpu_to_le16((u16) (pos - (u8 *) assoc) + S_DS_GEN);

	/* Set the Capability info at last */
	tmp_cap = bss_desc->cap_info_bitmap;

	if (priv->adapter->config_bands == BAND_B)
		tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME;

	tmp_cap &= CAPINFO_MASK;
	dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
	       tmp_cap, CAPINFO_MASK);
	assoc->cap_info_bitmap = cpu_to_le16(tmp_cap);

	return 0;
}
示例#20
0
int main(int argc, char **argv)
{
	struct usb_vendprod vendprod;
	struct dfu_if _rt_dif, _dif, *dif = &_dif;
	int num_devs;
	int num_ifs;
	unsigned int transfer_size = 0;
	enum mode mode = MODE_NONE;
	struct dfu_status status;
	int quirks_auto_detect = 1;
	dfu_quirks manual_quirks;
	dfu_handle handle;
	char *filename = NULL;
	char *alt_name = NULL; /* query alt name if non-NULL */
	char *end;
	int final_reset = 0;
	int page_size = getpagesize();
	int ret;
	
	printf("dfu-util - (C) 2007-2008 by OpenMoko Inc.\n"
	       "This program is Free Software and has ABSOLUTELY NO WARRANTY\n\n");

	dfu_quirks_clear(&manual_quirks);

	memset(dif, 0, sizeof(*dif));

	usb_init();
	//usb_set_debug(255);
	usb_find_busses();
	usb_find_devices();

	while (1) {
		int c, option_index = 0;
		c = getopt_long(argc, argv, "hVvld:p:c:i:a:t:U:D:C:S:RQNq:", opts,
				&option_index);
		if (c == -1)
			break;

		switch (c) {
		case 'h':
			help();
			exit(0);
			break;
		case 'V':
			print_version();
			exit(0);
			break;
		case 'v':
			verbose = 1;
			break;
		case 'l':
			list_dfu_interfaces();
			exit(0);
			break;
		case 'd':
			/* Parse device */
			if (parse_vendprod(&vendprod, optarg) < 0) {
				fprintf(stderr, "unable to parse `%s'\n", optarg);
				exit(2);
			}
			dif->vendor = vendprod.vendor;
			dif->product = vendprod.product;
			dif->flags |= (DFU_IFF_VENDOR | DFU_IFF_PRODUCT);
			break;
		case 'p':
			/* Parse device path */
			dif->path = optarg;
			dif->flags |= DFU_IFF_PATH;
			ret = resolve_device_path(dif);
			if (ret < 0) {
				fprintf(stderr, "unable to parse `%s'\n",
				    optarg);
				exit(2);
			}
			if (!ret) {
				fprintf(stderr, "cannot find `%s'\n", optarg);
				exit(1);
			}
			break;
		case 'c':
			/* Configuration */
			dif->configuration = atoi(optarg);
			dif->flags |= DFU_IFF_CONFIG;
			break;
		case 'i':
			/* Interface */
			dif->interface = atoi(optarg);
			dif->flags |= DFU_IFF_IFACE;
			break;
		case 'a':
			/* Interface Alternate Setting */
			dif->altsetting = strtoul(optarg, &end, 0);
			if (*end)
				alt_name = optarg;
			dif->flags |= DFU_IFF_ALT;
			break;
		case 't':
			transfer_size = atoi(optarg);
			break;
		case 'U':
			mode = MODE_UPLOAD;
			filename = optarg;
			break;
		case 'D':
			mode = MODE_DOWNLOAD;
			filename = optarg;
			break;
		case 'C':
			mode = MODE_COMPARE;
			/* TODO: verify firmware */
			filename = optarg;
			break;
		case 'S':
			filename = optarg;
			add_file_suffix(filename);
			exit(0);
			break;
		case 'R':
			final_reset = 1;
			break;

		case 'Q':
			dfu_quirks_print();
			exit(0);
			break;
		case 'N':
			quirks_auto_detect = 0;
			break;
		case 'q':
			quirks_auto_detect = 0;
			dfu_quirk_set(&manual_quirks, atoi(optarg));
			break;

		default:
			help();
			exit(2);
		}
	}

	if (mode == MODE_NONE) {
		fprintf(stderr, "You need to specify one of -D or -U\n");
		help();
		exit(2);
	}

	if (!filename) {
		fprintf(stderr, "You need to specify a filename to -D -r -U\n");
		help();
		exit(2);
	}

	dfu_init(&handle, 5000);

	num_devs = count_dfu_devices(dif);
	if (num_devs == 0) {
		fprintf(stderr, "No DFU capable USB device found\n");
		exit(1);
	} else if (num_devs > 1) {
		/* We cannot safely support more than one DFU capable device
		 * with same vendor/product ID, since during DFU we need to do
		 * a USB bus reset, after which the target device will get a
		 * new address */
		fprintf(stderr, "More than one DFU capable USB device found, "
		       "you might try `--list' and then disconnect all but one "
		       "device\n");
		exit(3);
	}
	if (!get_first_dfu_device(dif))
		exit(3);

	/* We have exactly one device. It's usb_device is now in dif->dev */

	printf("Opening USB Device 0x%04x:0x%04x...\n", dif->vendor, dif->product);
	dif->dev_handle = usb_open(dif->dev);
	if (!dif->dev_handle) {
		fprintf(stderr, "Cannot open device: %s\n", usb_strerror());
		exit(1);
	}

	/* try to find first DFU interface of device */
	memcpy(&_rt_dif, dif, sizeof(_rt_dif));
	if (!get_first_dfu_if(&_rt_dif))
		exit(1);

	handle.device = _rt_dif.dev_handle;
	handle.interface = _rt_dif.interface;

	/* automatic quirk detection */
	if(quirks_auto_detect)
	{
		/* TODO: let the detection be influenced by bcdDFU, bcdDevice */
		handle.quirk_flags = dfu_quirks_detect(0, dif->vendor, dif->product, 0);
	}
	/* merge with manual quirks */
	dfu_quirks_insert(&handle.quirk_flags, &manual_quirks);
	if(!dfu_quirks_is_empty(&handle.quirk_flags))
	{
		printf("Selected quirks: ");
		dfu_quirks_print_set(&handle.quirk_flags);
		printf("\n");
	}

	if (!_rt_dif.flags & DFU_IFF_DFU) {
		/* In the 'first round' during runtime mode, there can only be one
	 	* DFU Interface descriptor according to the DFU Spec. */

		/* FIXME: check if the selected device really has only one */

		printf("Claiming USB DFU Runtime Interface %d...\n",
		       _rt_dif.interface);
		if (usb_claim_interface(_rt_dif.dev_handle, _rt_dif.interface) < 0) {
			fprintf(stderr, "Cannot claim interface: %s\n",
				usb_strerror());
			exit(1);
		}

		/* DFU 1.0, Table 4.1: in runtime-mode, alternate
		   interface setting must be zero. therefore we can
		   assume, '0' is correct.

		   the reason we use usb_set_altinterface() here:
		   switch devices to the interface set using
		   usb_claim_interface() above - for some reason this
		   isn't done there.  is the only libusb API which
		   issues the SET_INTERFACE USB standard request is
		   usb_set_altinterface()
		*/
		if (usb_set_altinterface(_rt_dif.dev_handle, 0) < 0) {
			fprintf(stderr, "Cannot set alternate interface %d: %s\n",
				0,
				usb_strerror());
			exit(1);
		}

		printf("Determining device state: ");
		int state = -1;
		if ( (state = dfu_get_state(&handle)) < 0) {
			exit(1);
		}
		printf("state = %s\n", dfu_state_to_string(state));

		dfu_sm_set_state_unchecked(&handle, state);

		printf("Determining device status: ");
		if (dfu_get_status(&handle, &status ) < 0) {
			exit(1);
		}

		printf("state = %s, status = %d = \"%s\"\n",
		       dfu_state_to_string(status.bState),
		       status.bStatus,
		       dfu_status_to_string(status.bStatus) );

		switch (status.bState) {
		case DFU_STATE_appIDLE:
		case DFU_STATE_appDETACH:
			printf("Device really in Runtime Mode, send DFU "
			       "detach request...\n");

			if(status.bState == DFU_STATE_appDETACH) {
				printf("Device is already in state %s, skipping DFU_DETACH request\n",
				       dfu_state_to_string(status.bState));
			}
			else
			{
				if (dfu_detach(&handle, 1000) < 0) {
					exit(1);
					break;
				}
			}

			/* handle bitWillDetach (DFU 1.1) */
			if(handle.dfu_ver == DFU_VERSION_1_1 &&
			   handle.func_dfu.bmAttributes & USB_DFU_WILL_DETACH)
			{
				/* TODO: test this with a real DFU 1.1 device */
				printf("Waiting for USB device's own detach (bitWillDetach=1)...\n");
				dfu_sm_set_state_checked(&handle,
							 DFU_STATE_dfuIDLE);
			}
			else
			{
				printf("Resetting USB...\n");
				ret = dfu_usb_reset(&handle);
				if (ret < 0 && ret != -ENODEV)
				{
					/* do nothing; error msg is output in dfu_usb_reset. */
				}
			}

			sleep(2);
			break;
		case DFU_STATE_dfuERROR:
			printf("dfuERROR, clearing status\n");
			if (dfu_clear_status(&handle) < 0) {
				exit(1);
				break;
			}
			break;
		default:
			fprintf(stderr, "WARNING: Runtime device already "
				"in DFU state ?!?\n");
			goto dfustate;
			break;
		}

		/* now we need to re-scan the bus and locate our device */
		if (usb_find_devices() < 2)
			printf("not at least 2 device changes found ?!?\n");

		if (dif->flags & DFU_IFF_PATH) {
			ret = resolve_device_path(dif);
			if (ret < 0) {
				fprintf(stderr,
				    "internal error: cannot re-parse `%s'\n",
				    dif->path);
				abort();
			}
			if (!ret) {
				fprintf(stderr,
				    "Can't resolve path after RESET?\n");
				exit(1);
			}
		}

		num_devs = count_dfu_devices(dif);
		if (num_devs == 0) {
			fprintf(stderr, "Lost device after RESET?\n");
			exit(1);
		} else if (num_devs > 1) {
			fprintf(stderr, "More than one DFU capable USB "
				"device found, you might try `--list' and "
				"then disconnect all but one device\n");
			exit(1);
		}
		if (!get_first_dfu_device(dif))
			exit(3);

		printf("Opening USB Device...\n");
		dif->dev_handle = usb_open(dif->dev);
		if (!dif->dev_handle) {
			fprintf(stderr, "Cannot open device: %s\n",
				usb_strerror());
			exit(1);
		}
	} else {
		/* we're already in DFU mode, so we can skip the detach/reset
		 * procedure */
	}

 dfustate:
	if (alt_name) {
		int n;

		n = find_dfu_if(dif->dev, &alt_by_name, alt_name);
		if (!n) {
			fprintf(stderr, "No such Alternate Setting: \"%s\"\n",
			    alt_name);
			exit(1);
		}
		if (n < 0) {
			fprintf(stderr, "Error %d in name lookup\n", n);
			exit(1);
		}
		dif->altsetting = n-1;
	}

	print_dfu_if(dif, NULL);

	num_ifs = count_dfu_interfaces(dif->dev);
	if (num_ifs < 0) {
		fprintf(stderr, "No DFU Interface after RESET?!?\n");
		exit(1);
	} else if (num_ifs == 1) {
		if (!get_first_dfu_if(dif)) {
			fprintf(stderr, "Can't find the single available "
				"DFU IF\n");
			exit(1);
		}
	} else if (num_ifs > 1 && (!dif->flags) & (DFU_IFF_IFACE|DFU_IFF_ALT)) {
		fprintf(stderr, "We have %u DFU Interfaces/Altsettings, "
			"you have to specify one via --intf / --alt options\n",
			num_ifs);
		exit(1);
	}

#if 0
	printf("Setting Configuration %u...\n", dif->configuration);
	if (usb_set_configuration(dif->dev_handle, dif->configuration) < 0) {
		fprintf(stderr, "Cannot set configuration: %s\n",
			usb_strerror());
		exit(1);
	}
#endif
	printf("Claiming USB DFU Interface...\n");
	if (usb_claim_interface(dif->dev_handle, dif->interface) < 0) {
		fprintf(stderr, "Cannot claim interface: %s\n",
			usb_strerror());
		exit(1);
	}

	printf("Setting Alternate Setting ...\n");
	if (usb_set_altinterface(dif->dev_handle, dif->altsetting) < 0) {
		fprintf(stderr, "Cannot set alternate interface: %s\n",
			usb_strerror());
		exit(1);
	}

	/* update the handle to point to the dfu-mode descriptor */
	handle.device = dif->dev_handle;
	handle.interface = dif->interface;

 status_again:
	printf("Determining device status: ");
	if (dfu_get_status(&handle, &status ) < 0) {
		fprintf(stderr, "error get_status: %s\n", usb_strerror());
		exit(1);
	}
	printf("state = %s, status = %d\n",
	       dfu_state_to_string(status.bState), status.bStatus);

	/* force the statemachine into current status */
	dfu_sm_set_state_unchecked(&handle, status.bState);

	switch (status.bState) {
	case DFU_STATE_appIDLE:
	case DFU_STATE_appDETACH:
		fprintf(stderr, "Device still in Runtime Mode!\n");
		exit(1);
		break;
	case DFU_STATE_dfuERROR:
		printf("dfuERROR, clearing status\n");
		if (dfu_clear_status(&handle) < 0) {
			fprintf(stderr, "error clear_status: %s\n",
				usb_strerror());
			exit(1);
		}
		goto status_again;
		break;
	case DFU_STATE_dfuDNLOAD_IDLE:
	case DFU_STATE_dfuUPLOAD_IDLE:
		printf("aborting previous incomplete transfer\n");
		if (dfu_abort(&handle) < 0) {
			fprintf(stderr, "can't send DFU_ABORT: %s\n",
				usb_strerror());
			exit(1);
		}
		goto status_again;
		break;
	case DFU_STATE_dfuIDLE:
		printf("dfuIDLE, continuing\n");
		break;
	}

	/* Obtain DFU functional descriptor */
	ret = usb_get_descriptor(dif->dev_handle, 0x21, dif->interface,
				 &(handle.func_dfu), sizeof(handle.func_dfu));
	if (ret < 0) {
		fprintf(stderr, "Error obtaining DFU functional "
			"descriptor: %s\n", usb_strerror());

		if(dfu_quirk_is_set(&handle.quirk_flags,
				    QUIRK_IGNORE_INVALID_FUNCTIONAL_DESCRIPTOR))
		{
			handle.func_dfu.bmAttributes =
				USB_DFU_CAN_DOWNLOAD |
				USB_DFU_CAN_UPLOAD |
				USB_DFU_MANIFEST_TOL;

			handle.func_dfu.wTransferSize = cpu_to_le16(transfer_size);
			if(!transfer_size)
				transfer_size = page_size;

			handle.func_dfu.bcdDFUVersion = USB_DFU_VER_1_0;

			fprintf(stderr, "   Still, try to continue with default flags/manual settings.\n");
		}
		else
		{
			exit(1);
		}
	}
	else
	{
		transfer_size = le16_to_cpu(handle.func_dfu.wTransferSize);
	}

	/* why is this limited to page_size, a host-dependent value? (sgiessl) */
	if (transfer_size > page_size)
		transfer_size = page_size;

	/* quirk overwriting DFU version */
	if(dfu_quirk_is_set(&handle.quirk_flags, QUIRK_FORCE_DFU_VERSION_1_0))
	{
		handle.func_dfu.bcdDFUVersion = USB_DFU_VER_1_0;
	}
	else if(dfu_quirk_is_set(&handle.quirk_flags, QUIRK_FORCE_DFU_VERSION_1_1))
	{
		handle.func_dfu.bcdDFUVersion = USB_DFU_VER_1_1;
	}

	/* read DFU version */
	switch(handle.func_dfu.bcdDFUVersion)
	{
	case USB_DFU_VER_1_1:
		handle.dfu_ver = DFU_VERSION_1_1;
		break;

	default:
		printf("WARNING: device specifies unknown DFU version 0x%.2x, defaulting to DFU 1.0\n",
		       handle.func_dfu.bcdDFUVersion);
		/* fall through intended */
	case USB_DFU_VER_1_0:
		handle.dfu_ver = DFU_VERSION_1_0;
		break;
	}

	printf("Transfer Size = 0x%04x\n", transfer_size);

	printf("Device functional descriptor: %s\n",
	       dfu_func_descriptor_to_string(&handle.func_dfu));

	if (DFU_STATUS_OK != status.bStatus ) {
		printf("WARNING: DFU Status: '%s'\n",
			dfu_status_to_string(status.bStatus));
		/* Clear our status & try again. */
		dfu_clear_status(&handle);
		dfu_get_status(&handle, &status);

		if (DFU_STATUS_OK != status.bStatus) {
			fprintf(stderr, "Error: %d\n", status.bStatus);
			exit(1);
		}
        }

	switch (mode) {
	case MODE_UPLOAD:
		if (sam7dfu_do_upload(&handle,
				  transfer_size, filename) < 0)
			exit(1);
		break;
	case MODE_DOWNLOAD:
		if (sam7dfu_do_dnload(&handle,
				  transfer_size, filename) < 0)
			exit(1);
		break;
	default:
		fprintf(stderr, "Unsupported mode: %u\n", mode);
		exit(1);
	}

	if (final_reset) {
		if(dfu_quirk_is_set(&handle.quirk_flags, QUIRK_OPENMOKO_DETACH_BEFORE_FINAL_RESET))
		{
			/* DFU_DETACH is only allowed in appIDLE, so
			   this is non-standard (as of DFU 1.0, and
			   1.1). */
			printf("Initiating reset by sending DFU_DETACH (QUIRK_OPENMOKO_DETACH_BEFORE_FINAL_RESET)\n");
			if (dfu_detach(&handle, 1000) < 0) {
				fprintf(stderr, "can't detach: %s\n", usb_strerror());
			}
		}
		printf("Resetting USB to switch back to runtime mode\n");
		ret = usb_reset(dif->dev_handle);
		if (ret < 0 && ret != -ENODEV) {
			fprintf(stderr, "error resetting after download: %s\n", 
			usb_strerror());
		}
	}

	exit(0);
}
示例#21
0
int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
{
	struct b43_dmaring *ring;
	struct ieee80211_hdr *hdr;
	int err = 0;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	hdr = (struct ieee80211_hdr *)skb->data;
	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
		/* The multicast ring will be sent after the DTIM */
		ring = dev->dma.tx_ring_mcast;
		/* Set the more-data bit. Ucode will clear it on
		 * the last frame for us. */
		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
	} else {
		/* Decide by priority where to put this frame. */
		ring = select_ring_by_priority(
			dev, skb_get_queue_mapping(skb));
	}

	B43_WARN_ON(!ring->tx);

	if (unlikely(ring->stopped)) {
		/* We get here only because of a bug in mac80211.
		 * Because of a race, one packet may be queued after
		 * the queue is stopped, thus we got called when we shouldn't.
		 * For now, just refuse the transmit. */
		if (b43_debug(dev, B43_DBG_DMAVERBOSE))
			b43err(dev->wl, "Packet after queue stopped\n");
		err = -ENOSPC;
		goto out;
	}

	if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
		/* If we get here, we have a real error with the queue
		 * full, but queues not stopped. */
		b43err(dev->wl, "DMA queue overflow\n");
		err = -ENOSPC;
		goto out;
	}

	/* Assign the queue number to the ring (if not already done before)
	 * so TX status handling can use it. The queue to ring mapping is
	 * static, so we don't need to store it per frame. */
	ring->queue_prio = skb_get_queue_mapping(skb);

	err = dma_tx_fragment(ring, skb);
	if (unlikely(err == -ENOKEY)) {
		/* Drop this packet, as we don't have the encryption key
		 * anymore and must not transmit it unencrypted. */
		ieee80211_free_txskb(dev->wl->hw, skb);
		err = 0;
		goto out;
	}
	if (unlikely(err)) {
		b43err(dev->wl, "DMA tx mapping failure\n");
		goto out;
	}
	if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
	    should_inject_overflow(ring)) {
		/* This TX ring is full. */
		unsigned int skb_mapping = skb_get_queue_mapping(skb);
		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
		dev->wl->tx_queue_stopped[skb_mapping] = 1;
		ring->stopped = true;
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
		}
	}
out:

	return err;
}
示例#22
0
struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
			    unsigned long ino)
{
	struct nilfs_iget_args args = {
		.ino = ino, .root = root, .cno = 0, .for_gc = 0
	};

	return ilookup5(sb, ino, nilfs_iget_test, &args);
}

struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
				unsigned long ino)
{
	struct nilfs_iget_args args = {
		.ino = ino, .root = root, .cno = 0, .for_gc = 0
	};

	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
}

struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
			 unsigned long ino)
{
	struct inode *inode;
	int err;

	inode = nilfs_iget_locked(sb, root, ino);
	if (unlikely(!inode))
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;

	err = __nilfs_read_inode(sb, root, ino, inode);
	if (unlikely(err)) {
		iget_failed(inode);
		return ERR_PTR(err);
	}
	unlock_new_inode(inode);
	return inode;
}

struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
				__u64 cno)
{
	struct nilfs_iget_args args = {
		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
	};
	struct inode *inode;
	int err;

	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
	if (unlikely(!inode))
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;

	err = nilfs_init_gcinode(inode);
	if (unlikely(err)) {
		iget_failed(inode);
		return ERR_PTR(err);
	}
	unlock_new_inode(inode);
	return inode;
}

void nilfs_write_inode_common(struct inode *inode,
			      struct nilfs_inode *raw_inode, int has_bmap)
{
	struct nilfs_inode_info *ii = NILFS_I(inode);

	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
	raw_inode->i_uid = cpu_to_le32(inode->i_uid);
	raw_inode->i_gid = cpu_to_le32(inode->i_gid);
	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
	raw_inode->i_size = cpu_to_le64(inode->i_size);
	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);

	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
	raw_inode->i_generation = cpu_to_le32(inode->i_generation);

	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;

		/* zero-fill unused portion in the case of super root block */
		raw_inode->i_xattr = 0;
		raw_inode->i_pad = 0;
		memset((void *)raw_inode + sizeof(*raw_inode), 0,
		       nilfs->ns_inode_size - sizeof(*raw_inode));
	}

	if (has_bmap)
		nilfs_bmap_write(ii->i_bmap, raw_inode);
	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
		raw_inode->i_device_code =
			cpu_to_le64(huge_encode_dev(inode->i_rdev));
	/* When extending inode, nilfs->ns_inode_size should be checked
	   for substitutions of appended fields */
}

void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
{
	ino_t ino = inode->i_ino;
	struct nilfs_inode_info *ii = NILFS_I(inode);
	struct inode *ifile = ii->i_root->ifile;
	struct nilfs_inode *raw_inode;

	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);

	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);

	nilfs_write_inode_common(inode, raw_inode, 0);
		/* XXX: call with has_bmap = 0 is a workaround to avoid
		   deadlock of bmap. This delays update of i_bmap to just
		   before writing */
	nilfs_ifile_unmap_inode(ifile, ino, ibh);
}

#define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */

static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
				unsigned long from)
{
	unsigned long b;
	int ret;

	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
		return;
repeat:
	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
	if (ret == -ENOENT)
		return;
	else if (ret < 0)
		goto failed;

	if (b < from)
		return;

	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
	ret = nilfs_bmap_truncate(ii->i_bmap, b);
	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
	if (!ret || (ret == -ENOMEM &&
		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
		goto repeat;

failed:
	nilfs_warning(ii->vfs_inode.i_sb, __func__,
		      "failed to truncate bmap (ino=%lu, err=%d)",
		      ii->vfs_inode.i_ino, ret);
}

void nilfs_truncate(struct inode *inode)
{
	unsigned long blkoff;
	unsigned int blocksize;
	struct nilfs_transaction_info ti;
	struct super_block *sb = inode->i_sb;
	struct nilfs_inode_info *ii = NILFS_I(inode);

	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
		return;
	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
		return;

	blocksize = sb->s_blocksize;
	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
	nilfs_transaction_begin(sb, &ti, 0); /* never fails */

	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);

	nilfs_truncate_bmap(ii, blkoff);

	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	if (IS_SYNC(inode))
		nilfs_set_transaction_flag(NILFS_TI_SYNC);

	nilfs_mark_inode_dirty(inode);
	nilfs_set_file_dirty(inode, 0);
	nilfs_transaction_commit(sb);
	/* May construct a logical segment and may fail in sync mode.
	   But truncate has no return value. */
}
示例#23
0
/**
 *  iavf_asq_send_command - send command to Admin Queue
 *  @hw: pointer to the hw struct
 *  @desc: prefilled descriptor describing the command (non DMA mem)
 *  @buff: buffer to use for indirect commands
 *  @buff_size: size of buffer for indirect commands
 *  @cmd_details: pointer to command details structure
 *
 *  This is the main send command driver routine for the Admin Queue send
 *  queue.  It runs the queue, cleans the queue, etc
 **/
iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
				  void *buff, /* can be NULL */
				  u16  buff_size,
				  struct i40e_asq_cmd_details *cmd_details)
{
	struct iavf_dma_mem *dma_buff = NULL;
	struct i40e_asq_cmd_details *details;
	struct i40e_aq_desc *desc_on_ring;
	bool cmd_completed = false;
	iavf_status status = 0;
	u16  retval = 0;
	u32  val = 0;

	mutex_lock(&hw->aq.asq_mutex);

	if (hw->aq.asq.count == 0) {
		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
			   "AQTX: Admin queue not initialized.\n");
		status = I40E_ERR_QUEUE_EMPTY;
		goto asq_send_command_error;
	}

	hw->aq.asq_last_status = I40E_AQ_RC_OK;

	val = rd32(hw, hw->aq.asq.head);
	if (val >= hw->aq.num_asq_entries) {
		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
			   "AQTX: head overrun at %d\n", val);
		status = I40E_ERR_QUEUE_EMPTY;
		goto asq_send_command_error;
	}

	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
	if (cmd_details) {
		*details = *cmd_details;

		/* If the cmd_details are defined copy the cookie.  The
		 * cpu_to_le32 is not needed here because the data is ignored
		 * by the FW, only used by the driver
		 */
		if (details->cookie) {
			desc->cookie_high =
				cpu_to_le32(upper_32_bits(details->cookie));
			desc->cookie_low =
				cpu_to_le32(lower_32_bits(details->cookie));
		}
	} else {
		memset(details, 0, sizeof(struct i40e_asq_cmd_details));
	}

	/* clear requested flags and then set additional flags if defined */
	desc->flags &= ~cpu_to_le16(details->flags_dis);
	desc->flags |= cpu_to_le16(details->flags_ena);

	if (buff_size > hw->aq.asq_buf_size) {
		iavf_debug(hw,
			   IAVF_DEBUG_AQ_MESSAGE,
			   "AQTX: Invalid buffer size: %d.\n",
			   buff_size);
		status = I40E_ERR_INVALID_SIZE;
		goto asq_send_command_error;
	}

	if (details->postpone && !details->async) {
		iavf_debug(hw,
			   IAVF_DEBUG_AQ_MESSAGE,
			   "AQTX: Async flag not set along with postpone flag");
		status = I40E_ERR_PARAM;
		goto asq_send_command_error;
	}

	/* call clean and check queue available function to reclaim the
	 * descriptors that were processed by FW, the function returns the
	 * number of desc available
	 */
	/* the clean function called here could be called in a separate thread
	 * in case of asynchronous completions
	 */
	if (i40e_clean_asq(hw) == 0) {
		iavf_debug(hw,
			   IAVF_DEBUG_AQ_MESSAGE,
			   "AQTX: Error queue is full.\n");
		status = I40E_ERR_ADMIN_QUEUE_FULL;
		goto asq_send_command_error;
	}

	/* initialize the temp desc pointer with the right desc */
	desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);

	/* if the desc is available copy the temp desc to the right place */
	*desc_on_ring = *desc;

	/* if buff is not NULL assume indirect command */
	if (buff) {
		dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
		/* copy the user buff into the respective DMA buff */
		memcpy(dma_buff->va, buff, buff_size);
		desc_on_ring->datalen = cpu_to_le16(buff_size);

		/* Update the address values in the desc with the pa value
		 * for respective buffer
		 */
		desc_on_ring->params.external.addr_high =
				cpu_to_le32(upper_32_bits(dma_buff->pa));
		desc_on_ring->params.external.addr_low =
				cpu_to_le32(lower_32_bits(dma_buff->pa));
	}

	/* bump the tail */
	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
		      buff, buff_size);
	(hw->aq.asq.next_to_use)++;
	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
		hw->aq.asq.next_to_use = 0;
	if (!details->postpone)
		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);

	/* if cmd_details are not defined or async flag is not set,
	 * we need to wait for desc write back
	 */
	if (!details->async && !details->postpone) {
		u32 total_delay = 0;

		do {
			/* AQ designers suggest use of head for better
			 * timing reliability than DD bit
			 */
			if (iavf_asq_done(hw))
				break;
			udelay(50);
			total_delay += 50;
		} while (total_delay < hw->aq.asq_cmd_timeout);
	}

	/* if ready, copy the desc back to temp */
	if (iavf_asq_done(hw)) {
		*desc = *desc_on_ring;
		if (buff)
			memcpy(buff, dma_buff->va, buff_size);
		retval = le16_to_cpu(desc->retval);
		if (retval != 0) {
			iavf_debug(hw,
				   IAVF_DEBUG_AQ_MESSAGE,
				   "AQTX: Command completed with error 0x%X.\n",
				   retval);

			/* strip off FW internal code */
			retval &= 0xff;
		}
		cmd_completed = true;
		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
			status = 0;
		else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
			status = I40E_ERR_NOT_READY;
		else
			status = I40E_ERR_ADMIN_QUEUE_ERROR;
		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
	}

	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
		   "AQTX: desc and buffer writeback:\n");
	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);

	/* save writeback aq if requested */
	if (details->wb_desc)
		*details->wb_desc = *desc_on_ring;

	/* update the error if time out occurred */
	if ((!cmd_completed) &&
	    (!details->async && !details->postpone)) {
		if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
			iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
				   "AQTX: AQ Critical error.\n");
			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
		} else {
			iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
				   "AQTX: Writeback timeout.\n");
			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
		}
	}

asq_send_command_error:
	mutex_unlock(&hw->aq.asq_mutex);
	return status;
}
示例#24
0
文件: mmp.c 项目: AiWinters/linux
/*
 * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
 */
static int kmmpd(void *data)
{
	struct super_block *sb = ((struct mmpd_data *) data)->sb;
	struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
	struct mmp_struct *mmp;
	ext4_fsblk_t mmp_block;
	u32 seq = 0;
	unsigned long failed_writes = 0;
	int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
	unsigned mmp_check_interval;
	unsigned long last_update_time;
	unsigned long diff;
	int retval;

	mmp_block = le64_to_cpu(es->s_mmp_block);
	mmp = (struct mmp_struct *)(bh->b_data);
	mmp->mmp_time = cpu_to_le64(get_seconds());
	/*
	 * Start with the higher mmp_check_interval and reduce it if
	 * the MMP block is being updated on time.
	 */
	mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
				 EXT4_MMP_MIN_CHECK_INTERVAL);
	mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
	bdevname(bh->b_bdev, mmp->mmp_bdevname);

	memcpy(mmp->mmp_nodename, init_utsname()->nodename,
	       sizeof(mmp->mmp_nodename));

	while (!kthread_should_stop()) {
		if (++seq > EXT4_MMP_SEQ_MAX)
			seq = 1;

		mmp->mmp_seq = cpu_to_le32(seq);
		mmp->mmp_time = cpu_to_le64(get_seconds());
		last_update_time = jiffies;

		retval = write_mmp_block(sb, bh);
		/*
		 * Don't spew too many error messages. Print one every
		 * (s_mmp_update_interval * 60) seconds.
		 */
		if (retval) {
			if ((failed_writes % 60) == 0)
				ext4_error(sb, "Error writing to MMP block");
			failed_writes++;
		}

		if (!(le32_to_cpu(es->s_feature_incompat) &
		    EXT4_FEATURE_INCOMPAT_MMP)) {
			ext4_warning(sb, "kmmpd being stopped since MMP feature"
				     " has been disabled.");
			EXT4_SB(sb)->s_mmp_tsk = NULL;
			goto failed;
		}

		if (sb->s_flags & MS_RDONLY) {
			ext4_warning(sb, "kmmpd being stopped since filesystem "
				     "has been remounted as readonly.");
			EXT4_SB(sb)->s_mmp_tsk = NULL;
			goto failed;
		}

		diff = jiffies - last_update_time;
		if (diff < mmp_update_interval * HZ)
			schedule_timeout_interruptible(mmp_update_interval *
						       HZ - diff);

		/*
		 * We need to make sure that more than mmp_check_interval
		 * seconds have not passed since writing. If that has happened
		 * we need to check if the MMP block is as we left it.
		 */
		diff = jiffies - last_update_time;
		if (diff > mmp_check_interval * HZ) {
			struct buffer_head *bh_check = NULL;
			struct mmp_struct *mmp_check;

			retval = read_mmp_block(sb, &bh_check, mmp_block);
			if (retval) {
				ext4_error(sb, "error reading MMP data: %d",
					   retval);

				EXT4_SB(sb)->s_mmp_tsk = NULL;
				goto failed;
			}

			mmp_check = (struct mmp_struct *)(bh_check->b_data);
			if (mmp->mmp_seq != mmp_check->mmp_seq ||
			    memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
				   sizeof(mmp->mmp_nodename))) {
				dump_mmp_msg(sb, mmp_check,
					     "Error while updating MMP info. "
					     "The filesystem seems to have been"
					     " multiply mounted.");
				ext4_error(sb, "abort");
				goto failed;
			}
			put_bh(bh_check);
		}

		 /*
		 * Adjust the mmp_check_interval depending on how much time
		 * it took for the MMP block to be written.
		 */
		mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
					     EXT4_MMP_MAX_CHECK_INTERVAL),
					 EXT4_MMP_MIN_CHECK_INTERVAL);
		mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
	}

	/*
	 * Unmount seems to be clean.
	 */
	mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
	mmp->mmp_time = cpu_to_le64(get_seconds());

	retval = write_mmp_block(sb, bh);

failed:
	kfree(data);
	brelse(bh);
	return retval;
}
示例#25
0
	.channels = brcms_2ghz_chantable,
	.n_channels = ARRAY_SIZE(brcms_2ghz_chantable),
	.bitrates = legacy_ratetable,
	.n_bitrates = ARRAY_SIZE(legacy_ratetable),
	.ht_cap = {
		   /* from include/linux/ieee80211.h */
		   .cap = IEEE80211_HT_CAP_GRN_FLD |
		   IEEE80211_HT_CAP_SGI_20 |
		   IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT,
		   .ht_supported = true,
		   .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
		   .ampdu_density = AMPDU_DEF_MPDU_DENSITY,
		   .mcs = {
			   /* placeholders for now */
			   .rx_mask = {0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0},
			   .rx_highest = cpu_to_le16(500),
			   .tx_params = IEEE80211_HT_MCS_TX_DEFINED}
		   }
};

static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = {
	.band = IEEE80211_BAND_5GHZ,
	.channels = brcms_5ghz_nphy_chantable,
	.n_channels = ARRAY_SIZE(brcms_5ghz_nphy_chantable),
	.bitrates = legacy_ratetable + BRCMS_LEGACY_5G_RATE_OFFSET,
	.n_bitrates = ARRAY_SIZE(legacy_ratetable) -
			BRCMS_LEGACY_5G_RATE_OFFSET,
	.ht_cap = {
		   .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
			  IEEE80211_HT_CAP_SGI_40 |
			  IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */
示例#26
0
					IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
					IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
					IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
				.phy_cap_info[9] =
					IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
					IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
					IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
					IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
			},
			/*
			 * Set default Tx/Rx HE MCS NSS Support field.
			 * Indicate support for up to 2 spatial streams and all
			 * MCS, without any special cases
			 */
			.he_mcs_nss_supp = {
				.rx_mcs_80 = cpu_to_le16(0xfffa),
				.tx_mcs_80 = cpu_to_le16(0xfffa),
				.rx_mcs_160 = cpu_to_le16(0xfffa),
				.tx_mcs_160 = cpu_to_le16(0xfffa),
				.rx_mcs_80p80 = cpu_to_le16(0xffff),
				.tx_mcs_80p80 = cpu_to_le16(0xffff),
			},
			/*
			 * Set default PPE thresholds, with PPET16 set to 0,
			 * PPET8 set to 7
			 */
			.ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
		},
	},
	{
		.types_mask = BIT(NL80211_IFTYPE_AP),
示例#27
0
static int nilfs_remount(struct super_block *sb, int *flags, char *data)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct nilfs_super_block *sbp;
	struct the_nilfs *nilfs = sbi->s_nilfs;
	unsigned long old_sb_flags;
	struct nilfs_mount_options old_opts;
	int was_snapshot, err;

	lock_kernel();

	down_write(&nilfs->ns_super_sem);
	old_sb_flags = sb->s_flags;
	old_opts.mount_opt = sbi->s_mount_opt;
	old_opts.snapshot_cno = sbi->s_snapshot_cno;
	was_snapshot = nilfs_test_opt(sbi, SNAPSHOT);

	if (!parse_options(data, sb)) {
		err = -EINVAL;
		goto restore_opts;
	}
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL);

	err = -EINVAL;
	if (was_snapshot) {
		if (!(*flags & MS_RDONLY)) {
			printk(KERN_ERR "NILFS (device %s): cannot remount "
			       "snapshot read/write.\n",
			       sb->s_id);
			goto restore_opts;
		} else if (sbi->s_snapshot_cno != old_opts.snapshot_cno) {
			printk(KERN_ERR "NILFS (device %s): cannot "
			       "remount to a different snapshot.\n",
			       sb->s_id);
			goto restore_opts;
		}
	} else {
		if (nilfs_test_opt(sbi, SNAPSHOT)) {
			printk(KERN_ERR "NILFS (device %s): cannot change "
			       "a regular mount to a snapshot.\n",
			       sb->s_id);
			goto restore_opts;
		}
	}

	if (!nilfs_valid_fs(nilfs)) {
		printk(KERN_WARNING "NILFS (device %s): couldn't "
		       "remount because the filesystem is in an "
		       "incomplete recovery state.\n", sb->s_id);
		goto restore_opts;
	}

	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
		goto out;
	if (*flags & MS_RDONLY) {
		/* Shutting down the segment constructor */
		nilfs_detach_segment_constructor(sbi);
		sb->s_flags |= MS_RDONLY;

		/*
		 * Remounting a valid RW partition RDONLY, so set
		 * the RDONLY flag and then mark the partition as valid again.
		 */
		down_write(&nilfs->ns_sem);
		sbp = nilfs->ns_sbp[0];
		if (!(sbp->s_state & le16_to_cpu(NILFS_VALID_FS)) &&
		    (nilfs->ns_mount_state & NILFS_VALID_FS))
			sbp->s_state = cpu_to_le16(nilfs->ns_mount_state);
		sbp->s_mtime = cpu_to_le64(get_seconds());
		nilfs_commit_super(sbi, 1);
		up_write(&nilfs->ns_sem);
	} else {
		/*
		 * Mounting a RDONLY partition read-write, so reread and
		 * store the current valid flag.  (It may have been changed
		 * by fsck since we originally mounted the partition.)
		 */
		sb->s_flags &= ~MS_RDONLY;

		err = nilfs_attach_segment_constructor(sbi);
		if (err)
			goto restore_opts;

		down_write(&nilfs->ns_sem);
		nilfs_setup_super(sbi);
		up_write(&nilfs->ns_sem);
	}
 out:
	up_write(&nilfs->ns_super_sem);
	unlock_kernel();
	return 0;

 restore_opts:
	sb->s_flags = old_sb_flags;
	sbi->s_mount_opt = old_opts.mount_opt;
	sbi->s_snapshot_cno = old_opts.snapshot_cno;
	up_write(&nilfs->ns_super_sem);
	unlock_kernel();
	return err;
}
示例#28
0
static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
				  struct iwl_nvm_data *data,
				  struct ieee80211_sta_vht_cap *vht_cap,
				  u8 tx_chains, u8 rx_chains)
{
	int num_rx_ants = num_of_ant(rx_chains);
	int num_tx_ants = num_of_ant(tx_chains);
	unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?:
					   IEEE80211_VHT_MAX_AMPDU_1024K);

	vht_cap->vht_supported = true;

	vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
		       IEEE80211_VHT_CAP_RXSTBC_1 |
		       IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
		       3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
		       max_ampdu_exponent <<
		       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;

	if (data->vht160_supported)
		vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
				IEEE80211_VHT_CAP_SHORT_GI_160;

	if (cfg->vht_mu_mimo_supported)
		vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;

	if (cfg->ht_params->ldpc)
		vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;

	if (data->sku_cap_mimo_disabled) {
		num_rx_ants = 1;
		num_tx_ants = 1;
	}

	if (num_tx_ants > 1)
		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
	else
		vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;

	switch (iwlwifi_mod_params.amsdu_size) {
	case IWL_AMSDU_DEF:
		if (cfg->mq_rx_supported)
			vht_cap->cap |=
				IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
		else
			vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
		break;
	case IWL_AMSDU_2K:
		if (cfg->mq_rx_supported)
			vht_cap->cap |=
				IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
		else
			WARN(1, "RB size of 2K is not supported by this device\n");
		break;
	case IWL_AMSDU_4K:
		vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
		break;
	case IWL_AMSDU_8K:
		vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
		break;
	case IWL_AMSDU_12K:
		vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
		break;
	default:
		break;
	}

	vht_cap->vht_mcs.rx_mcs_map =
		cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
			    IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);

	if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
		vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
		/* this works because NOT_SUPPORTED == 3 */
		vht_cap->vht_mcs.rx_mcs_map |=
			cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
	}

	vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;

	vht_cap->vht_mcs.tx_highest |=
		cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
}
示例#29
0
文件: sb.c 项目: 119-org/hi3518-osdrv
/**
 * create_default_filesystem - format empty UBI volume.
 * @c: UBIFS file-system description object
 *
 * This function creates default empty file-system. Returns zero in case of
 * success and a negative error code in case of failure.
 */
static int create_default_filesystem(struct ubifs_info *c)
{
	struct ubifs_sb_node *sup;
	struct ubifs_mst_node *mst;
	struct ubifs_idx_node *idx;
	struct ubifs_branch *br;
	struct ubifs_ino_node *ino;
	struct ubifs_cs_node *cs;
	union ubifs_key key;
	int err, tmp, jnl_lebs, log_lebs, max_buds, main_lebs, main_first;
	int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0;
	int min_leb_cnt = UBIFS_MIN_LEB_CNT;
	long long tmp64, main_bytes;
	__le64 tmp_le64;

	/* Some functions called from here depend on the @c->key_len filed */
	c->key_len = UBIFS_SK_LEN;

	/*
	 * First of all, we have to calculate default file-system geometry -
	 * log size, journal size, etc.
	 */
	if (c->leb_cnt < 0x7FFFFFFF / DEFAULT_JNL_PERCENT)
		/* We can first multiply then divide and have no overflow */
		jnl_lebs = c->leb_cnt * DEFAULT_JNL_PERCENT / 100;
	else
		jnl_lebs = (c->leb_cnt / 100) * DEFAULT_JNL_PERCENT;

	if (jnl_lebs < UBIFS_MIN_JNL_LEBS)
		jnl_lebs = UBIFS_MIN_JNL_LEBS;
	if (jnl_lebs * c->leb_size > DEFAULT_MAX_JNL)
		jnl_lebs = DEFAULT_MAX_JNL / c->leb_size;

	/*
	 * The log should be large enough to fit reference nodes for all bud
	 * LEBs. Because buds do not have to start from the beginning of LEBs
	 * (half of the LEB may contain committed data), the log should
	 * generally be larger, make it twice as large.
	 */
	tmp = 2 * (c->ref_node_alsz * jnl_lebs) + c->leb_size - 1;
	log_lebs = tmp / c->leb_size;
	/* Plus one LEB reserved for commit */
	log_lebs += 1;
	if (c->leb_cnt - min_leb_cnt > 8) {
		/* And some extra space to allow writes while committing */
		log_lebs += 1;
		min_leb_cnt += 1;
	}

	max_buds = jnl_lebs - log_lebs;
	if (max_buds < UBIFS_MIN_BUD_LEBS)
		max_buds = UBIFS_MIN_BUD_LEBS;

	/*
	 * Orphan nodes are stored in a separate area. One node can store a lot
	 * of orphan inode numbers, but when new orphan comes we just add a new
	 * orphan node. At some point the nodes are consolidated into one
	 * orphan node.
	 */
	orph_lebs = UBIFS_MIN_ORPH_LEBS;
#ifdef CONFIG_UBIFS_FS_DEBUG
	if (c->leb_cnt - min_leb_cnt > 1)
		/*
		 * For debugging purposes it is better to have at least 2
		 * orphan LEBs, because the orphan subsystem would need to do
		 * consolidations and would be stressed more.
		 */
		orph_lebs += 1;
#endif

	main_lebs = c->leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS - log_lebs;
	main_lebs -= orph_lebs;

	lpt_first = UBIFS_LOG_LNUM + log_lebs;
	c->lsave_cnt = DEFAULT_LSAVE_CNT;
	c->max_leb_cnt = c->leb_cnt;
	err = ubifs_create_dflt_lpt(c, &main_lebs, lpt_first, &lpt_lebs,
				    &big_lpt);
	if (err)
		return err;

	dbg_gen("LEB Properties Tree created (LEBs %d-%d)", lpt_first,
		lpt_first + lpt_lebs - 1);

	main_first = c->leb_cnt - main_lebs;

	/* Create default superblock */
	tmp = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size);
	sup = kzalloc(tmp, GFP_KERNEL);
	if (!sup)
		return -ENOMEM;

	tmp64 = (long long)max_buds * c->leb_size;
	if (big_lpt)
		sup_flags |= UBIFS_FLG_BIGLPT;

	sup->ch.node_type  = UBIFS_SB_NODE;
	sup->key_hash      = UBIFS_KEY_HASH_R5;
	sup->flags         = cpu_to_le32(sup_flags);
	sup->min_io_size   = cpu_to_le32(c->min_io_size);
	sup->leb_size      = cpu_to_le32(c->leb_size);
	sup->leb_cnt       = cpu_to_le32(c->leb_cnt);
	sup->max_leb_cnt   = cpu_to_le32(c->max_leb_cnt);
	sup->max_bud_bytes = cpu_to_le64(tmp64);
	sup->log_lebs      = cpu_to_le32(log_lebs);
	sup->lpt_lebs      = cpu_to_le32(lpt_lebs);
	sup->orph_lebs     = cpu_to_le32(orph_lebs);
	sup->jhead_cnt     = cpu_to_le32(DEFAULT_JHEADS_CNT);
	sup->fanout        = cpu_to_le32(DEFAULT_FANOUT);
	sup->lsave_cnt     = cpu_to_le32(c->lsave_cnt);
	sup->fmt_version   = cpu_to_le32(UBIFS_FORMAT_VERSION);
	sup->time_gran     = cpu_to_le32(DEFAULT_TIME_GRAN);
	if (c->mount_opts.override_compr)
		sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
	else
		sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO);

	generate_random_uuid(sup->uuid);

	main_bytes = (long long)main_lebs * c->leb_size;
	tmp64 = div_u64(main_bytes * DEFAULT_RP_PERCENT, 100);
	if (tmp64 > DEFAULT_MAX_RP_SIZE)
		tmp64 = DEFAULT_MAX_RP_SIZE;
	sup->rp_size = cpu_to_le64(tmp64);
	sup->ro_compat_version = cpu_to_le32(UBIFS_RO_COMPAT_VERSION);

	err = ubifs_write_node(c, sup, UBIFS_SB_NODE_SZ, 0, 0, UBI_LONGTERM);
	kfree(sup);
	if (err)
		return err;

	dbg_gen("default superblock created at LEB 0:0");

	/* Create default master node */
	mst = kzalloc(c->mst_node_alsz, GFP_KERNEL);
	if (!mst)
		return -ENOMEM;

	mst->ch.node_type = UBIFS_MST_NODE;
	mst->log_lnum     = cpu_to_le32(UBIFS_LOG_LNUM);
	mst->highest_inum = cpu_to_le64(UBIFS_FIRST_INO);
	mst->cmt_no       = 0;
	mst->root_lnum    = cpu_to_le32(main_first + DEFAULT_IDX_LEB);
	mst->root_offs    = 0;
	tmp = ubifs_idx_node_sz(c, 1);
	mst->root_len     = cpu_to_le32(tmp);
	mst->gc_lnum      = cpu_to_le32(main_first + DEFAULT_GC_LEB);
	mst->ihead_lnum   = cpu_to_le32(main_first + DEFAULT_IDX_LEB);
	mst->ihead_offs   = cpu_to_le32(ALIGN(tmp, c->min_io_size));
	mst->index_size   = cpu_to_le64(ALIGN(tmp, 8));
	mst->lpt_lnum     = cpu_to_le32(c->lpt_lnum);
	mst->lpt_offs     = cpu_to_le32(c->lpt_offs);
	mst->nhead_lnum   = cpu_to_le32(c->nhead_lnum);
	mst->nhead_offs   = cpu_to_le32(c->nhead_offs);
	mst->ltab_lnum    = cpu_to_le32(c->ltab_lnum);
	mst->ltab_offs    = cpu_to_le32(c->ltab_offs);
	mst->lsave_lnum   = cpu_to_le32(c->lsave_lnum);
	mst->lsave_offs   = cpu_to_le32(c->lsave_offs);
	mst->lscan_lnum   = cpu_to_le32(main_first);
	mst->empty_lebs   = cpu_to_le32(main_lebs - 2);
	mst->idx_lebs     = cpu_to_le32(1);
	mst->leb_cnt      = cpu_to_le32(c->leb_cnt);

	/* Calculate lprops statistics */
	tmp64 = main_bytes;
	tmp64 -= ALIGN(ubifs_idx_node_sz(c, 1), c->min_io_size);
	tmp64 -= ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size);
	mst->total_free = cpu_to_le64(tmp64);

	tmp64 = ALIGN(ubifs_idx_node_sz(c, 1), c->min_io_size);
	ino_waste = ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size) -
			  UBIFS_INO_NODE_SZ;
	tmp64 += ino_waste;
	tmp64 -= ALIGN(ubifs_idx_node_sz(c, 1), 8);
	mst->total_dirty = cpu_to_le64(tmp64);

	/*  The indexing LEB does not contribute to dark space */
	tmp64 = (c->main_lebs - 1) * c->dark_wm;
	mst->total_dark = cpu_to_le64(tmp64);

	mst->total_used = cpu_to_le64(UBIFS_INO_NODE_SZ);

	err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0,
			       UBI_UNKNOWN);
	if (err) {
		kfree(mst);
		return err;
	}
	err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1, 0,
			       UBI_UNKNOWN);
	kfree(mst);
	if (err)
		return err;

	dbg_gen("default master node created at LEB %d:0", UBIFS_MST_LNUM);

	/* Create the root indexing node */
	tmp = ubifs_idx_node_sz(c, 1);
	idx = kzalloc(ALIGN(tmp, c->min_io_size), GFP_KERNEL);
	if (!idx)
		return -ENOMEM;

	c->key_fmt = UBIFS_SIMPLE_KEY_FMT;
	c->key_hash = key_r5_hash;

	idx->ch.node_type = UBIFS_IDX_NODE;
	idx->child_cnt = cpu_to_le16(1);
	ino_key_init(c, &key, UBIFS_ROOT_INO);
	br = ubifs_idx_branch(c, idx, 0);
	key_write_idx(c, &key, &br->key);
	br->lnum = cpu_to_le32(main_first + DEFAULT_DATA_LEB);
	br->len  = cpu_to_le32(UBIFS_INO_NODE_SZ);
	err = ubifs_write_node(c, idx, tmp, main_first + DEFAULT_IDX_LEB, 0,
			       UBI_UNKNOWN);
	kfree(idx);
	if (err)
		return err;

	dbg_gen("default root indexing node created LEB %d:0",
		main_first + DEFAULT_IDX_LEB);

	/* Create default root inode */
	tmp = ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size);
	ino = kzalloc(tmp, GFP_KERNEL);
	if (!ino)
		return -ENOMEM;

	ino_key_init_flash(c, &ino->key, UBIFS_ROOT_INO);
	ino->ch.node_type = UBIFS_INO_NODE;
	ino->creat_sqnum = cpu_to_le64(++c->max_sqnum);
	ino->nlink = cpu_to_le32(2);
	tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec);
	ino->atime_sec   = tmp_le64;
	ino->ctime_sec   = tmp_le64;
	ino->mtime_sec   = tmp_le64;
	ino->atime_nsec  = 0;
	ino->ctime_nsec  = 0;
	ino->mtime_nsec  = 0;
	ino->mode = cpu_to_le32(S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
	ino->size = cpu_to_le64(UBIFS_INO_NODE_SZ);

	/* Set compression enabled by default */
	ino->flags = cpu_to_le32(UBIFS_COMPR_FL);

	err = ubifs_write_node(c, ino, UBIFS_INO_NODE_SZ,
			       main_first + DEFAULT_DATA_LEB, 0,
			       UBI_UNKNOWN);
	kfree(ino);
	if (err)
		return err;

	dbg_gen("root inode created at LEB %d:0",
		main_first + DEFAULT_DATA_LEB);

	/*
	 * The first node in the log has to be the commit start node. This is
	 * always the case during normal file-system operation. Write a fake
	 * commit start node to the log.
	 */
	tmp = ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size);
	cs = kzalloc(tmp, GFP_KERNEL);
	if (!cs)
		return -ENOMEM;

	cs->ch.node_type = UBIFS_CS_NODE;
	err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM,
			       0, UBI_UNKNOWN);
	kfree(cs);

	ubifs_msg("default file-system created");
	return 0;
}
示例#30
0
文件: smp.c 项目: Arunvasu/taoshan
static int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
{
	struct hci_conn *hcon = conn->hcon;
	struct smp_cmd_pairing *req, *rsp;
	__u8 *keydist;

	BT_DBG("conn %p force %d", conn, force);

	if (IS_ERR(hcon->hdev->tfm))
		return PTR_ERR(hcon->hdev->tfm);

	rsp = (void *) &hcon->prsp[1];

	/* The responder sends its keys first */
	if (!force && hcon->out && (rsp->resp_key_dist & 0x07))
		return 0;

	req = (void *) &hcon->preq[1];

	if (hcon->out) {
		keydist = &rsp->init_key_dist;
		*keydist &= req->init_key_dist;
	} else {
		keydist = &rsp->resp_key_dist;
		*keydist &= req->resp_key_dist;
	}


	BT_DBG("keydist 0x%x", *keydist);

	if (*keydist & SMP_DIST_ENC_KEY) {
		struct smp_cmd_encrypt_info enc;
		struct smp_cmd_master_ident ident;
		__le16 ediv;

		get_random_bytes(enc.ltk, sizeof(enc.ltk));
		get_random_bytes(&ediv, sizeof(ediv));
		get_random_bytes(ident.rand, sizeof(ident.rand));

		smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);

		hci_add_ltk(hcon->hdev, 1, conn->dst, hcon->dst_type,
				hcon->smp_key_size, hcon->auth, ediv,
				ident.rand, enc.ltk);

		ident.ediv = cpu_to_le16(ediv);

		smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);

		*keydist &= ~SMP_DIST_ENC_KEY;
	}

	if (*keydist & SMP_DIST_ID_KEY) {
		struct smp_cmd_ident_addr_info addrinfo;
		struct smp_cmd_ident_info idinfo;

		/* Send a dummy key */
		get_random_bytes(idinfo.irk, sizeof(idinfo.irk));

		smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo);

		/* Just public address */
		memset(&addrinfo, 0, sizeof(addrinfo));
		bacpy(&addrinfo.bdaddr, conn->src);

		smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
								&addrinfo);

		*keydist &= ~SMP_DIST_ID_KEY;
	}

	if (*keydist & SMP_DIST_SIGN) {
		struct smp_cmd_sign_info sign;

		/* Send a dummy key */
		get_random_bytes(sign.csrk, sizeof(sign.csrk));

		smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign);

		*keydist &= ~SMP_DIST_SIGN;
	}

	if (hcon->out) {
		if (hcon->disconn_cfm_cb)
			hcon->disconn_cfm_cb(hcon, 0);
		del_timer(&hcon->smp_timer);
		clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
		hci_conn_put(hcon);
	} else if (rsp->resp_key_dist) {
		if (hcon->disconn_cfm_cb)
			hcon->disconn_cfm_cb(hcon, SMP_UNSPECIFIED);
		clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
		mgmt_auth_failed(hcon->hdev->id, conn->dst, SMP_UNSPECIFIED);
		hci_conn_put(hcon);
	}

	return 0;
}