static inline void mwl_tx_count_packet(struct ieee80211_sta *sta, u8 tid) { struct mwl_sta *sta_info; struct mwl_tx_info *tx_stats; if (WARN_ON(tid >= SYSADPT_MAX_TID)) return; sta_info = mwl_dev_get_sta(sta); tx_stats = &sta_info->tx_stats[tid]; if (tx_stats->start_time == 0) tx_stats->start_time = jiffies; /* reset the packet count after each second elapses. If the number of * packets ever exceeds the ampdu_min_traffic threshold, we will allow * an ampdu stream to be started. */ if (jiffies - tx_stats->start_time > HZ) { tx_stats->pkts = 0; tx_stats->start_time = 0; } else { tx_stats->pkts++; } }
static int mwl_mac80211_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mwl_priv *priv = hw->priv; struct mwl_vif *mwl_vif; struct mwl_sta *sta_info; struct ieee80211_key_conf *key; int rc; int i; mwl_vif = mwl_dev_get_vif(vif); sta_info = mwl_dev_get_sta(sta); memset(sta_info, 0, sizeof(*sta_info)); if (vif->type == NL80211_IFTYPE_MESH_POINT) { sta_info->is_mesh_node = true; /* Patch mesh interface for HT based on 88W8897. When authsae or * wpa_supplicant is used for mesh security, HT capbility wan't * be set. This would be removed if problem is fixed. */ sta->ht_cap.ht_supported = true; sta->ht_cap.cap = 0x6f; sta->ht_cap.mcs.rx_mask[0] = 0xff; sta->ht_cap.mcs.rx_mask[1] = 0xff; sta->ht_cap.ampdu_factor = 0x3; sta->ht_cap.ampdu_density = 0x5; } if (sta->ht_cap.ht_supported) { sta_info->is_ampdu_allowed = true; sta_info->is_amsdu_allowed = false; if (sta->ht_cap.cap & IEEE80211_HT_CAP_MAX_AMSDU) sta_info->amsdu_ctrl.cap = MWL_AMSDU_SIZE_8K; else sta_info->amsdu_ctrl.cap = MWL_AMSDU_SIZE_4K; } sta_info->iv16 = 1; sta_info->iv32 = 0; spin_lock_init(&sta_info->amsdu_lock); spin_lock_bh(&priv->sta_lock); list_add_tail(&sta_info->list, &priv->sta_list); spin_unlock_bh(&priv->sta_lock); if (vif->type == NL80211_IFTYPE_STATION) mwl_fwcmd_set_new_stn_del(hw, vif, sta->addr); rc = mwl_fwcmd_set_new_stn_add(hw, vif, sta); for (i = 0; i < NUM_WEP_KEYS; i++) { key = (struct ieee80211_key_conf *)mwl_vif->wep_key_conf[i].key; if (mwl_vif->wep_key_conf[i].enabled) mwl_mac80211_set_key(hw, SET_KEY, vif, sta, key); } return rc; }
static int mwl_mac80211_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mwl_priv *priv = hw->priv; int rc; struct mwl_sta *sta_info = mwl_dev_get_sta(sta); mwl_tx_del_sta_amsdu_pkts(sta); spin_lock_bh(&priv->stream_lock); mwl_fwcmd_del_sta_streams(hw, sta); spin_unlock_bh(&priv->stream_lock); mwl_tx_del_pkts_via_sta(hw, sta); rc = mwl_fwcmd_set_new_stn_del(hw, vif, sta->addr); spin_lock_bh(&priv->sta_lock); list_del(&sta_info->list); spin_unlock_bh(&priv->sta_lock); return rc; }
static inline struct sk_buff *mwl_tx_do_amsdu(struct mwl_priv *priv, int desc_num, struct sk_buff *tx_skb, struct ieee80211_tx_info *tx_info) { struct ieee80211_sta *sta; struct mwl_sta *sta_info; struct mwl_tx_ctrl *tx_ctrl = (struct mwl_tx_ctrl *)&tx_info->status; struct ieee80211_tx_info *amsdu_info; struct sk_buff_head *amsdu_pkts; struct mwl_amsdu_frag *amsdu; int amsdu_allow_size; struct ieee80211_hdr *wh; int wh_len; u16 len; u8 *data; sta = (struct ieee80211_sta *)tx_ctrl->sta; sta_info = mwl_dev_get_sta(sta); if (!sta_info->is_amsdu_allowed) return tx_skb; wh = (struct ieee80211_hdr *)tx_skb->data; if (sta_info->is_mesh_node && is_multicast_ether_addr(wh->addr3)) return tx_skb; if (sta_info->amsdu_ctrl.cap == MWL_AMSDU_SIZE_4K) amsdu_allow_size = SYSADPT_AMSDU_4K_MAX_SIZE; else if (sta_info->amsdu_ctrl.cap == MWL_AMSDU_SIZE_8K) amsdu_allow_size = SYSADPT_AMSDU_8K_MAX_SIZE; else return tx_skb; spin_lock_bh(&sta_info->amsdu_lock); amsdu = &sta_info->amsdu_ctrl.frag[desc_num]; if (tx_skb->len > SYSADPT_AMSDU_ALLOW_SIZE) { if (amsdu->num) { mwl_tx_skb(priv, desc_num, amsdu->skb); amsdu->num = 0; amsdu->cur_pos = NULL; if (!mwl_tx_available(priv, desc_num)) { skb_queue_head(&priv->txq[desc_num], tx_skb); spin_unlock_bh(&sta_info->amsdu_lock); return NULL; } } spin_unlock_bh(&sta_info->amsdu_lock); return tx_skb; } /* potential amsdu size, should add amsdu header 14 bytes + * maximum padding 3. */ wh_len = ieee80211_hdrlen(wh->frame_control); len = tx_skb->len - wh_len + 17; if (amsdu->num) { if ((amsdu->skb->len + len) > amsdu_allow_size) { mwl_tx_skb(priv, desc_num, amsdu->skb); amsdu->num = 0; amsdu->cur_pos = NULL; } } amsdu->jiffies = jiffies; len = tx_skb->len - wh_len; if (amsdu->num == 0) { struct sk_buff *newskb; amsdu_pkts = (struct sk_buff_head *) kmalloc(sizeof(*amsdu_pkts), GFP_ATOMIC); if (!amsdu_pkts) { spin_unlock_bh(&sta_info->amsdu_lock); return tx_skb; } newskb = dev_alloc_skb(amsdu_allow_size + SYSADPT_MIN_BYTES_HEADROOM); if (!newskb) { spin_unlock_bh(&sta_info->amsdu_lock); kfree(amsdu_pkts); return tx_skb; } data = newskb->data; memcpy(data, tx_skb->data, wh_len); if (sta_info->is_mesh_node) { ether_addr_copy(data + wh_len, wh->addr3); ether_addr_copy(data + wh_len + ETH_ALEN, wh->addr4); } else { ether_addr_copy(data + wh_len, ieee80211_get_DA(wh)); ether_addr_copy(data + wh_len + ETH_ALEN, ieee80211_get_SA(wh)); } *(u8 *)(data + wh_len + ETH_HLEN - 1) = len & 0xff; *(u8 *)(data + wh_len + ETH_HLEN - 2) = (len >> 8) & 0xff; memcpy(data + wh_len + ETH_HLEN, tx_skb->data + wh_len, len); skb_put(newskb, tx_skb->len + ETH_HLEN); tx_ctrl->qos_ctrl |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; amsdu_info = IEEE80211_SKB_CB(newskb); memcpy(amsdu_info, tx_info, sizeof(*tx_info)); skb_queue_head_init(amsdu_pkts); ((struct mwl_tx_ctrl *)&amsdu_info->status)->amsdu_pkts = (void *)amsdu_pkts; amsdu->skb = newskb; } else {
static inline void mwl_tx_skb(struct mwl_priv *priv, int desc_num, struct sk_buff *tx_skb) { struct ieee80211_tx_info *tx_info; struct mwl_tx_ctrl *tx_ctrl; struct mwl_tx_hndl *tx_hndl; struct mwl_tx_desc *tx_desc; struct ieee80211_sta *sta; struct ieee80211_vif *vif; struct mwl_vif *mwl_vif; struct ieee80211_key_conf *k_conf; bool ccmp = false; struct mwl_dma_data *dma_data; struct ieee80211_hdr *wh; dma_addr_t dma; if (WARN_ON(!tx_skb)) return; tx_info = IEEE80211_SKB_CB(tx_skb); tx_ctrl = (struct mwl_tx_ctrl *)&tx_info->status; sta = (struct ieee80211_sta *)tx_ctrl->sta; vif = (struct ieee80211_vif *)tx_ctrl->vif; mwl_vif = mwl_dev_get_vif(vif); k_conf = (struct ieee80211_key_conf *)tx_ctrl->k_conf; mwl_tx_encapsulate_frame(priv, tx_skb, k_conf, &ccmp); dma_data = (struct mwl_dma_data *)tx_skb->data; wh = &dma_data->wh; if (ieee80211_is_data(wh->frame_control) || (ieee80211_is_mgmt(wh->frame_control) && ieee80211_has_protected(wh->frame_control) && !is_multicast_ether_addr(wh->addr1))) { if (is_multicast_ether_addr(wh->addr1)) { if (ccmp) { mwl_tx_insert_ccmp_hdr(dma_data->data, mwl_vif->keyidx, mwl_vif->iv16, mwl_vif->iv32); INCREASE_IV(mwl_vif->iv16, mwl_vif->iv32); } } else { if (ccmp) { if (vif->type == NL80211_IFTYPE_STATION) { mwl_tx_insert_ccmp_hdr(dma_data->data, mwl_vif->keyidx, mwl_vif->iv16, mwl_vif->iv32); INCREASE_IV(mwl_vif->iv16, mwl_vif->iv32); } else { struct mwl_sta *sta_info; sta_info = mwl_dev_get_sta(sta); mwl_tx_insert_ccmp_hdr(dma_data->data, 0, sta_info->iv16, sta_info->iv32); INCREASE_IV(sta_info->iv16, sta_info->iv32); } } } } tx_hndl = priv->desc_data[desc_num].pnext_tx_hndl; tx_hndl->psk_buff = tx_skb; tx_desc = tx_hndl->pdesc; tx_desc->tx_priority = tx_ctrl->tx_priority; tx_desc->qos_ctrl = cpu_to_le16(tx_ctrl->qos_ctrl); tx_desc->pkt_len = cpu_to_le16(tx_skb->len); tx_desc->packet_info = 0; tx_desc->data_rate = 0; tx_desc->type = tx_ctrl->type; tx_desc->xmit_control = tx_ctrl->xmit_control; tx_desc->sap_pkt_info = 0; dma = pci_map_single(priv->pdev, tx_skb->data, tx_skb->len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(priv->pdev, dma)) { dev_kfree_skb_any(tx_skb); wiphy_err(priv->hw->wiphy, "failed to map pci memory!\n"); return; } tx_desc->pkt_ptr = cpu_to_le32(dma); tx_desc->status = cpu_to_le32(EAGLE_TXD_STATUS_FW_OWNED); /* make sure all the memory transactions done by cpu were completed */ wmb(); /*Data Memory Barrier*/ writel(MACREG_H2ARIC_BIT_PPA_READY, priv->iobase1 + MACREG_REG_H2A_INTERRUPT_EVENTS); priv->desc_data[desc_num].pnext_tx_hndl = tx_hndl->pnext; priv->fw_desc_cnt[desc_num]++; }
static int mwl_mac80211_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size, bool amsdu) #endif { int rc = 0; struct mwl_priv *priv = hw->priv; struct mwl_ampdu_stream *stream; u8 *addr = sta->addr, idx; struct mwl_sta *sta_info; sta_info = mwl_dev_get_sta(sta); spin_lock_bh(&priv->stream_lock); stream = mwl_fwcmd_lookup_stream(hw, addr, tid); switch (action) { case IEEE80211_AMPDU_RX_START: case IEEE80211_AMPDU_RX_STOP: break; case IEEE80211_AMPDU_TX_START: if (!sta_info->is_ampdu_allowed) { wiphy_warn(hw->wiphy, "ampdu not allowed\n"); rc = -EPERM; break; } if (!stream) { stream = mwl_fwcmd_add_stream(hw, sta, tid); if (!stream) { wiphy_warn(hw->wiphy, "no stream found\n"); rc = -EPERM; break; } } spin_unlock_bh(&priv->stream_lock); rc = mwl_fwcmd_check_ba(hw, stream, vif); spin_lock_bh(&priv->stream_lock); if (rc) { mwl_fwcmd_remove_stream(hw, stream); wiphy_err(hw->wiphy, "ampdu start error code: %d\n", rc); rc = -EPERM; break; } stream->state = AMPDU_STREAM_IN_PROGRESS; *ssn = 0; ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); break; case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: if (stream) { if (stream->state == AMPDU_STREAM_ACTIVE) { mwl_tx_del_ampdu_pkts(hw, sta, tid); idx = stream->idx; spin_unlock_bh(&priv->stream_lock); mwl_fwcmd_destroy_ba(hw, idx); spin_lock_bh(&priv->stream_lock); } mwl_fwcmd_remove_stream(hw, stream); ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); } else { rc = -EPERM; } break; case IEEE80211_AMPDU_TX_OPERATIONAL: if (stream) { if (WARN_ON(stream->state != AMPDU_STREAM_IN_PROGRESS)) { rc = -EPERM; break; } spin_unlock_bh(&priv->stream_lock); rc = mwl_fwcmd_create_ba(hw, stream, buf_size, vif); spin_lock_bh(&priv->stream_lock); if (!rc) { stream->state = AMPDU_STREAM_ACTIVE; } else { idx = stream->idx; spin_unlock_bh(&priv->stream_lock); mwl_fwcmd_destroy_ba(hw, idx); spin_lock_bh(&priv->stream_lock); mwl_fwcmd_remove_stream(hw, stream); wiphy_err(hw->wiphy, "ampdu operation error code: %d\n", rc); } } else { rc = -EPERM; } break; default: rc = -ENOTSUPP; break; } spin_unlock_bh(&priv->stream_lock); return rc; }
void pcie_tx_xmit_ndp(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct mwl_priv *priv = hw->priv; struct pcie_priv *pcie_priv = priv->hif.priv; struct ieee80211_tx_info *tx_info; struct ieee80211_key_conf *k_conf; struct mwl_vif *mwl_vif; int index; struct ieee80211_sta *sta; struct mwl_sta *sta_info; struct ieee80211_hdr *wh; u8 *da; u16 qos; u8 tid = 0; struct mwl_ampdu_stream *stream = NULL; u16 tx_que_priority; bool mgmtframe = false; struct ieee80211_mgmt *mgmt; bool eapol_frame = false; bool start_ba_session = false; struct pcie_tx_ctrl_ndp *tx_ctrl; tx_info = IEEE80211_SKB_CB(skb); k_conf = tx_info->control.hw_key; mwl_vif = mwl_dev_get_vif(tx_info->control.vif); index = skb_get_queue_mapping(skb); sta = control->sta; sta_info = sta ? mwl_dev_get_sta(sta) : NULL; wh = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_data_qos(wh->frame_control)) qos = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(wh))); else qos = 0xFFFF; if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { index = IEEE80211_AC_VO; eapol_frame = true; } if (ieee80211_is_mgmt(wh->frame_control)) { mgmtframe = true; mgmt = (struct ieee80211_mgmt *)skb->data; } if (mgmtframe) { u16 capab; if (unlikely(ieee80211_is_action(wh->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_BACK && mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)) { capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; index = utils_tid_to_ac(tid); } if (unlikely(ieee80211_is_assoc_req(wh->frame_control))) utils_add_basic_rates(hw->conf.chandef.chan->band, skb); if (ieee80211_is_probe_req(wh->frame_control) || ieee80211_is_probe_resp(wh->frame_control)) tx_que_priority = PROBE_RESPONSE_TXQNUM; else { if (( (mwl_vif->macid == SYSADPT_NUM_OF_AP) && (!ieee80211_has_protected(wh->frame_control) || (ieee80211_has_protected(wh->frame_control) && ieee80211_is_auth(wh->frame_control))) ) || !sta || ieee80211_is_auth(wh->frame_control) || ieee80211_is_assoc_req(wh->frame_control) || ieee80211_is_assoc_resp(wh->frame_control)) tx_que_priority = MGMT_TXQNUM; else { if (is_multicast_ether_addr(wh->addr1) && (mwl_vif->macid != SYSADPT_NUM_OF_AP)) tx_que_priority = mwl_vif->macid * SYSADPT_MAX_TID; else tx_que_priority = SYSADPT_MAX_TID * (sta_info->stnid + QUEUE_STAOFFSET) + 6; } } if (ieee80211_is_assoc_resp(wh->frame_control) || ieee80211_is_reassoc_resp(wh->frame_control)) { struct sk_buff *ack_skb; struct ieee80211_tx_info *ack_info; ack_skb = skb_copy(skb, GFP_ATOMIC); ack_info = IEEE80211_SKB_CB(ack_skb); pcie_tx_prepare_info(priv, 0, ack_info); ieee80211_tx_status(hw, ack_skb); } pcie_tx_encapsulate_frame(priv, skb, k_conf, NULL); } else {