Beispiel #1
0
void iwm_tx_worker(struct work_struct *work)
{
	struct iwm_priv *iwm;
	struct iwm_tx_info *tx_info = NULL;
	struct sk_buff *skb;
	int cmdlen, ret;
	struct iwm_tx_queue *txq;
	int pool_id;

	txq = container_of(work, struct iwm_tx_queue, worker);
	iwm = container_of(txq, struct iwm_priv, txq[txq->id]);

	pool_id = queue_to_pool_id(txq->id);

	while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	       !skb_queue_empty(&txq->queue)) {

		skb = skb_dequeue(&txq->queue);
		tx_info = skb_to_tx_info(skb);
		cmdlen = IWM_UDMA_HDR_LEN + skb->len;

		IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
			   "%d, color: %d\n", txq->id, skb, tx_info->sta,
			   tx_info->color);

#if !CONFIG_IWM_TX_CONCATENATED
		
		ret = iwm_send_packet(iwm, skb, pool_id);
#else

		if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
			iwm_tx_send_concat_packets(iwm, txq);

		ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
		if (ret) {
			IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
				   "%d, Tx worker stopped\n", txq->id);
			skb_queue_head(&txq->queue, skb);
			break;
		}

		txq->concat_ptr = txq->concat_buf + txq->concat_count;
		iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
		txq->concat_count += ALIGN(cmdlen, 16);
#endif
		kfree_skb(skb);
	}

	iwm_tx_send_concat_packets(iwm, txq);

	if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
	    !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	    (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
		IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
		netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
	}
}
void iwm_if_free(struct iwm_priv *iwm)
{
	if (!iwm_to_ndev(iwm))
		return;

	cancel_delayed_work_sync(&iwm->ct_kill_delay);
	free_netdev(iwm_to_ndev(iwm));
	iwm_priv_deinit(iwm);
	kfree(iwm->umac_profile);
	iwm->umac_profile = NULL;
	iwm_wdev_free(iwm);
}
int iwm_if_add(struct iwm_priv *iwm)
{
	struct net_device *ndev = iwm_to_ndev(iwm);
	int ret;

	ret = register_netdev(ndev);
	if (ret < 0) {
		dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret);
		return ret;
	}

	return 0;
}
void iwm_tx_worker(struct work_struct *work)
{
	struct iwm_priv *iwm;
	struct iwm_tx_info *tx_info = NULL;
	struct sk_buff *skb;
	struct iwm_tx_queue *txq;
	struct iwm_sta_info *sta_info;
	struct iwm_tid_info *tid_info;
	int cmdlen, ret, pool_id;

	txq = container_of(work, struct iwm_tx_queue, worker);
	iwm = container_of(txq, struct iwm_priv, txq[txq->id]);

	pool_id = queue_to_pool_id(txq->id);

	while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	       !skb_queue_empty(&txq->queue)) {

		spin_lock_bh(&txq->lock);
		skb = skb_dequeue(&txq->queue);
		spin_unlock_bh(&txq->lock);

		tx_info = skb_to_tx_info(skb);
		sta_info = &iwm->sta_table[tx_info->sta];
		if (!sta_info->valid) {
			IWM_ERR(iwm, "Trying to send a frame to unknown STA\n");
			kfree_skb(skb);
			continue;
		}

		tid_info = &sta_info->tid_info[tx_info->tid];

		mutex_lock(&tid_info->mutex);

		if (tid_info->stopped) {
			IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n",
				   tx_info->sta, tx_info->tid);
			spin_lock_bh(&txq->lock);
			skb_queue_tail(&txq->stopped_queue, skb);
			spin_unlock_bh(&txq->lock);

			mutex_unlock(&tid_info->mutex);
			continue;
		}

		cmdlen = IWM_UDMA_HDR_LEN + skb->len;

		IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
			   "%d, color: %d\n", txq->id, skb, tx_info->sta,
			   tx_info->color);

		if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
			iwm_tx_send_concat_packets(iwm, txq);

		ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
		if (ret) {
			IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
				   "%d, Tx worker stopped\n", txq->id);
			spin_lock_bh(&txq->lock);
			skb_queue_head(&txq->queue, skb);
			spin_unlock_bh(&txq->lock);

			mutex_unlock(&tid_info->mutex);
			break;
		}

		txq->concat_ptr = txq->concat_buf + txq->concat_count;
		tid_info->last_seq_num =
			iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
		txq->concat_count += ALIGN(cmdlen, 16);

		mutex_unlock(&tid_info->mutex);

		kfree_skb(skb);
	}

	iwm_tx_send_concat_packets(iwm, txq);

	if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
	    !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	    (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
		IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
		netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
	}
}
void iwm_if_remove(struct iwm_priv *iwm)
{
	unregister_netdev(iwm_to_ndev(iwm));
}
Beispiel #6
0
int iwm_send_umac_config(struct iwm_priv *iwm,
			 __le32 reset_flags)
{
	int ret;

	/* Use UMAC default values */
	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_POWER_INDEX, iwm->conf.power_index);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
				      CFG_FRAG_THRESHOLD,
				      iwm->conf.frag_threshold);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_RTS_THRESHOLD,
				      iwm->conf.rts_threshold);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_CTS_TO_SELF, iwm->conf.cts_to_self);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_COEX_MODE, iwm->conf.coexist_mode);
	if (ret < 0)
		return ret;

	/*
	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_ASSOCIATION_TIMEOUT,
				      iwm->conf.assoc_timeout);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_ROAM_TIMEOUT,
				      iwm->conf.roam_timeout);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_WIRELESS_MODE,
				      WIRELESS_MODE_11A | WIRELESS_MODE_11G);
	if (ret < 0)
		return ret;
	*/

	ret = iwm_umac_set_config_var(iwm, CFG_NET_ADDR,
				      iwm_to_ndev(iwm)->dev_addr, ETH_ALEN);
	if (ret < 0)
		return ret;

	/* UMAC PM static configurations */
	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_PM_LEGACY_RX_TIMEOUT, 0x12C);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_PM_LEGACY_TX_TIMEOUT, 0x15E);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_PM_CTRL_FLAGS, 0x30001);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_PM_KEEP_ALIVE_IN_BEACONS, 0x80);
	if (ret < 0)
		return ret;

	/* reset UMAC */
	ret = iwm_send_umac_reset(iwm, reset_flags, 1);
	if (ret < 0)
		return ret;

	ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC,
			       WAIT_NOTIF_TIMEOUT);
	if (ret) {
		IWM_ERR(iwm, "Wait for UMAC RESET timeout\n");
		return ret;
	}

	return ret;
}
Beispiel #7
0
int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
	struct iwm_priv *iwm = ndev_to_iwm(netdev);
	struct net_device *ndev = iwm_to_ndev(iwm);
	struct wireless_dev *wdev = iwm_to_wdev(iwm);
	u8 *dst_addr;
	struct iwm_tx_info *tx_info;
	struct iwm_tx_queue *txq;
	struct iwm_sta_info *sta_info;
	u8 sta_id;
	u16 queue;
	int ret;

	if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
		IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
			   "not associated\n");
		netif_tx_stop_all_queues(netdev);
		goto drop;
	}

	queue = skb_get_queue_mapping(skb);
	BUG_ON(queue >= IWM_TX_DATA_QUEUES); 

	txq = &iwm->txq[queue];

	
	if (skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) {
		IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
		netif_stop_subqueue(netdev, queue);
		return NETDEV_TX_BUSY;
	}

	ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype,
				       iwm->bssid, 0);
	if (ret) {
		IWM_ERR(iwm, "build wifi header failed\n");
		goto drop;
	}

	dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1;

	for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) {
		sta_info = &iwm->sta_table[sta_id];
		if (sta_info->valid &&
		    !memcmp(dst_addr, sta_info->addr, ETH_ALEN))
			break;
	}

	if (sta_id == IWM_STA_TABLE_NUM) {
		IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n",
			dst_addr);
		goto drop;
	}

	tx_info = skb_to_tx_info(skb);
	tx_info->sta = sta_id;
	tx_info->color = sta_info->color;
	
	if (sta_info->qos)
		tx_info->tid = skb->priority;
	else
		tx_info->tid = IWM_UMAC_MGMT_TID;

	skb_queue_tail(&iwm->txq[queue].queue, skb);

	queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);

	ndev->stats.tx_packets++;
	ndev->stats.tx_bytes += skb->len;
	return NETDEV_TX_OK;

 drop:
	ndev->stats.tx_dropped++;
	dev_kfree_skb_any(skb);
	return NETDEV_TX_OK;
}
int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags)
{
	int ret;

	/*                         */
	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_POWER_INDEX, iwm->conf.power_index);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
				      CFG_FRAG_THRESHOLD,
				      iwm->conf.frag_threshold);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_RTS_THRESHOLD,
				      iwm->conf.rts_threshold);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_CTS_TO_SELF, iwm->conf.cts_to_self);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_WIRELESS_MODE,
				      iwm->conf.wireless_mode);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_COEX_MODE, modparam_wiwi);
	if (ret < 0)
		return ret;

	/*
                                                           
                                  
                                   
             
             

                                                           
                           
                                  
             
             

                                                           
                            
                                                 
             
             
 */

	ret = iwm_umac_set_config_var(iwm, CFG_NET_ADDR,
				      iwm_to_ndev(iwm)->dev_addr, ETH_ALEN);
	if (ret < 0)
		return ret;

	/*                               */
	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_PM_LEGACY_RX_TIMEOUT, 0x12C);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_PM_LEGACY_TX_TIMEOUT, 0x15E);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_PM_CTRL_FLAGS, 0x1);
	if (ret < 0)
		return ret;

	ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
				      CFG_PM_KEEP_ALIVE_IN_BEACONS, 0x80);
	if (ret < 0)
		return ret;

	/*            */
	ret = iwm_send_umac_reset(iwm, reset_flags, 1);
	if (ret < 0)
		return ret;

	ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC,
			       WAIT_NOTIF_TIMEOUT);
	if (ret) {
		IWM_ERR(iwm, "Wait for UMAC RESET timeout\n");
		return ret;
	}

	return ret;
}