int ieee80211_crypto_handle_keymiss(struct ieee80211com *ic, wbuf_t wbuf, struct ieee80211_rx_status *rs) { struct ieee80211_node *ni = NULL; /* * Handle packets with keycache miss */ if ((rs->rs_flags & IEEE80211_RX_KEYMISS)) { ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wbuf_header(wbuf)); if( ni != NULL) { struct ieee80211vap *vap = ni->ni_vap; ieee80211_key_update_begin(vap); if (!ieee80211_crypto_keymiss(ni, wbuf, rs)) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: Couldn't decrypt, dropping packet.\n", __func__); wbuf_free(wbuf); ieee80211_key_update_end(vap); ieee80211_free_node(ni); return 1; }else { ieee80211_key_update_end(vap); ieee80211_free_node(ni); } } } return 0; }
static void ath_handle_micerror(struct ieee80211com *ic, struct ieee80211_frame *wh, int keyix) { struct ieee80211_node *ni; /* XXX recheck MIC to deal w/ chips that lie */ /* XXX discard MIC errors on !data frames */ ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); if (ni != NULL) { ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); ieee80211_free_node(ni); } }
/* * This function determines whether the received frame is a valid UAPSD trigger. * Called from interrupt context or DPC context depending on parameter isr_context. */ bool ath_net80211_check_uapsdtrigger(ieee80211_handle_t ieee, struct ieee80211_qosframe *qwh, u_int16_t keyix, bool isr_context) { struct ieee80211com *ic = NET80211_HANDLE(ieee); struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ieee80211_node *ni; int tid, ac; u_int16_t frame_seq; int queue_depth; bool isapsd = false; /* * Locate the node for sender */ IEEE80211_KEYMAP_LOCK(scn); ni = (keyix != HAL_RXKEYIX_INVALID) ? scn->sc_keyixmap[keyix] : NULL; if (ni == NULL) { IEEE80211_KEYMAP_UNLOCK(scn); /* * No key index or no entry, do a lookup */ if (isr_context) { ni = ieee80211_find_rxnode_nolock(ic, (struct ieee80211_frame_min *)qwh); } else { ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)qwh); } if (ni == NULL) { return isapsd; } } else { ieee80211_ref_node(ni); IEEE80211_KEYMAP_UNLOCK(scn); } if (!(ni->ni_flags & IEEE80211_NODE_UAPSD)) goto end; if (ni->ni_flags & IEEE80211_NODE_UAPSD_SP) goto end; /* * Must deal with change of state here, since otherwise there would * be a race (on two quick frames from STA) between this code and the * tasklet where we would: * - miss a trigger on entry to PS if we're already trigger hunting * - generate spurious SP on exit (due to frame following exit frame) */ if ((((qwh->i_fc[1] & IEEE80211_FC1_PWR_MGT) == IEEE80211_FC1_PWR_MGT) ^ ((ni->ni_flags & IEEE80211_NODE_UAPSD_TRIG) == IEEE80211_NODE_UAPSD_TRIG))) { ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP; if (qwh->i_fc[1] & IEEE80211_FC1_PWR_MGT) { WME_UAPSD_NODE_TRIGSEQINIT(ni); ni->ni_stats.ns_uapsd_triggerenabled++; ni->ni_flags |= IEEE80211_NODE_UAPSD_TRIG; } else { /* * Node transitioned from UAPSD -> Active state. Flush out UAPSD frames */ ni->ni_stats.ns_uapsd_active++; ni->ni_flags &= ~IEEE80211_NODE_UAPSD_TRIG; scn->sc_ops->process_uapsd_trigger(scn->sc_dev, ATH_NODE_NET80211(ni)->an_sta, WME_UAPSD_NODE_MAXQDEPTH, 0, 1, WME_UAPSD_NODE_MAXQDEPTH); } goto end; } /* * Check for a valid trigger frame i.e. QoS Data or QoS NULL */ if ( ((qwh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA ) || !(qwh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) ) { goto end; } if (((qwh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) && (((qwh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_QOS) || ((qwh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_QOS_NULL))) { tid = qwh->i_qos[0] & IEEE80211_QOS_TID; ac = TID_TO_WME_AC(tid); isapsd = true; if (WME_UAPSD_AC_CAN_TRIGGER(ac, ni)) { /* * Detect duplicate triggers and drop if so. */ frame_seq = le16toh(*(u_int16_t *)qwh->i_seq); if ((qwh->i_fc[1] & IEEE80211_FC1_RETRY) && frame_seq == ni->ni_uapsd_trigseq[ac]) { ni->ni_stats.ns_uapsd_duptriggers++; goto end; } /* * SP in progress for this node, discard trigger. */ if (ni->ni_flags & IEEE80211_NODE_UAPSD_SP) { ni->ni_stats.ns_uapsd_ignoretriggers++; goto end; } /* start the SP */ ni->ni_stats.ns_uapsd_triggers++; ni->ni_flags |= IEEE80211_NODE_UAPSD_SP; ni->ni_uapsd_trigseq[ac] = frame_seq; queue_depth = scn->sc_ops->process_uapsd_trigger(scn->sc_dev, ATH_NODE_NET80211(ni)->an_sta, ni->ni_uapsd_maxsp, ac, 0, WME_UAPSD_NODE_MAXQDEPTH); if (!queue_depth && (ni->ni_vap->iv_set_tim != NULL) && IEEE80211_NODE_UAPSD_USETIM(ni)) { ni->ni_vap->iv_set_tim(ni, 0); } } } end: ieee80211_free_node(ni); return isapsd; }
static void arn_rx_handler(struct arn_softc *sc) { #define PA2DESC(_sc, _pa) \ ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ ((_pa) - (_sc)->sc_desc_dma.cookie.dmac_address))) ieee80211com_t *ic = (ieee80211com_t *)sc; struct ath_buf *bf; struct ath_hal *ah = sc->sc_ah; struct ath_desc *ds; struct ath_rx_status *rs; mblk_t *rx_mp; struct ieee80211_frame *wh; int32_t len, ngood, loop = 1; uint8_t phyerr; int status; struct ieee80211_node *in; ngood = 0; do { mutex_enter(&sc->sc_rxbuflock); bf = list_head(&sc->sc_rxbuf_list); if (bf == NULL) { ARN_DBG((ARN_DBG_RECV, "arn: arn_rx_handler(): " "no buffer\n")); mutex_exit(&sc->sc_rxbuflock); break; } ASSERT(bf->bf_dma.cookie.dmac_address != NULL); ds = bf->bf_desc; if (ds->ds_link == bf->bf_daddr) { /* * Never process the self-linked entry at the end, * this may be met at heavy load. */ mutex_exit(&sc->sc_rxbuflock); break; } /* * Must provide the virtual address of the current * descriptor, the physical address, and the virtual * address of the next descriptor in the h/w chain. * This allows the HAL to look ahead to see if the * hardware is done with a descriptor by checking the * done bit in the following descriptor and the address * of the current descriptor the DMA engine is working * on. All this is necessary because of our use of * a self-linked list to avoid rx overruns. */ status = ath9k_hw_rxprocdesc(ah, ds, bf->bf_daddr, PA2DESC(sc, ds->ds_link), 0); if (status == EINPROGRESS) { mutex_exit(&sc->sc_rxbuflock); break; } list_remove(&sc->sc_rxbuf_list, bf); mutex_exit(&sc->sc_rxbuflock); rs = &ds->ds_rxstat; if (rs->rs_status != 0) { if (rs->rs_status & ATH9K_RXERR_CRC) { sc->sc_stats.ast_rx_crcerr++; } if (rs->rs_status & ATH9K_RXERR_FIFO) { sc->sc_stats.ast_rx_fifoerr++; } if (rs->rs_status & ATH9K_RXERR_DECRYPT) { sc->sc_stats.ast_rx_badcrypt++; } if (rs->rs_status & ATH9K_RXERR_PHY) { sc->sc_stats.ast_rx_phyerr++; phyerr = rs->rs_phyerr & 0x1f; sc->sc_stats.ast_rx_phy[phyerr]++; } goto rx_next; } len = rs->rs_datalen; /* less than sizeof(struct ieee80211_frame) */ if (len < 20) { sc->sc_stats.ast_rx_tooshort++; goto rx_next; } if ((rx_mp = allocb(sc->sc_dmabuf_size, BPRI_MED)) == NULL) { arn_problem("arn: arn_rx_handler(): " "allocing mblk buffer failed.\n"); return; } ARN_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORCPU); bcopy(bf->bf_dma.mem_va, rx_mp->b_rptr, len); rx_mp->b_wptr += len; wh = (struct ieee80211_frame *)rx_mp->b_rptr; if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) { /* * Ignore control frame received in promisc mode. */ freemsg(rx_mp); goto rx_next; } /* Remove the CRC at the end of IEEE80211 frame */ rx_mp->b_wptr -= IEEE80211_CRC_LEN; #ifdef DEBUG arn_printrxbuf(bf, status == 0); #endif /* * Locate the node for sender, track state, and then * pass the (referenced) node up to the 802.11 layer * for its use. */ in = ieee80211_find_rxnode(ic, wh); /* * Send the frame to net80211 for processing */ (void) ieee80211_input(ic, rx_mp, in, rs->rs_rssi, rs->rs_tstamp); /* release node */ ieee80211_free_node(in); /* * Arrange to update the last rx timestamp only for * frames from our ap when operating in station mode. * This assumes the rx key is always setup when associated. */ if (ic->ic_opmode == IEEE80211_M_STA && rs->rs_keyix != ATH9K_RXKEYIX_INVALID) { ngood++; } /* * change the default rx antenna if rx diversity chooses the * other antenna 3 times in a row. */ if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { if (++sc->sc_rxotherant >= 3) { ath9k_hw_setantenna(sc->sc_ah, ds->ds_rxstat.rs_antenna); sc->sc_defant = ds->ds_rxstat.rs_antenna; sc->sc_rxotherant = 0; } } else { sc->sc_rxotherant = 0; } rx_next: mutex_enter(&sc->sc_rxbuflock); list_insert_tail(&sc->sc_rxbuf_list, bf); mutex_exit(&sc->sc_rxbuflock); arn_rx_buf_link(sc, bf); } while (loop); if (ngood) sc->sc_lastrx = ath9k_hw_gettsf64(ah); #undef PA2DESC }
void an_rxeof(struct an_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ifnet *ifp = &ic->ic_if; struct ieee80211_frame *wh; struct ieee80211_rxinfo rxi; struct ieee80211_node *ni; struct an_rxframe frmhdr; struct mbuf *m; u_int16_t status; int fid, gaplen, len, off; uint8_t *gap; fid = CSR_READ_2(sc, AN_RX_FID); /* First read in the frame header */ if (an_read_bap(sc, fid, 0, &frmhdr, sizeof(frmhdr), sizeof(frmhdr)) != 0) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); ifp->if_ierrors++; DPRINTF(("an_rxeof: read fid %x failed\n", fid)); return; } an_swap16((u_int16_t *)&frmhdr.an_whdr, sizeof(struct ieee80211_frame)/2); status = frmhdr.an_rx_status; if ((status & AN_STAT_ERRSTAT) != 0 && ic->ic_opmode != IEEE80211_M_MONITOR) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); ifp->if_ierrors++; DPRINTF(("an_rxeof: fid %x status %x\n", fid, status)); return; } /* the payload length field includes a 16-bit "mystery field" */ len = frmhdr.an_rx_payload_len - sizeof(uint16_t); off = ALIGN(sizeof(struct ieee80211_frame)); if (off + len > MCLBYTES) { if (ic->ic_opmode != IEEE80211_M_MONITOR) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); ifp->if_ierrors++; DPRINTF(("an_rxeof: oversized packet %d\n", len)); return; } len = 0; } MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); ifp->if_ierrors++; DPRINTF(("an_rxeof: MGET failed\n")); return; } if (off + len + AN_GAPLEN_MAX > MHLEN) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); m_freem(m); ifp->if_ierrors++; DPRINTF(("an_rxeof: MCLGET failed\n")); return; } } m->m_data += off - sizeof(struct ieee80211_frame); if (ic->ic_opmode != IEEE80211_M_MONITOR) { gaplen = frmhdr.an_gaplen; if (gaplen > AN_GAPLEN_MAX) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); m_freem(m); ifp->if_ierrors++; DPRINTF(("%s: gap too long\n", __func__)); return; } /* * We don't need the 16-bit mystery field (payload length?), * so read it into the region reserved for the 802.11 header. * * When Cisco Aironet 350 cards w/ firmware version 5 or * greater operate with certain Cisco 350 APs, * the "gap" is filled with the SNAP header. Read * it in after the 802.11 header. */ gap = m->m_data + sizeof(struct ieee80211_frame) - sizeof(uint16_t); an_read_bap(sc, fid, -1, gap, gaplen + sizeof(u_int16_t), gaplen + sizeof(u_int16_t)); } else gaplen = 0; an_read_bap(sc, fid, -1, m->m_data + sizeof(struct ieee80211_frame) + gaplen, len, len); an_swap16((u_int16_t *)(m->m_data + sizeof(struct ieee80211_frame) + gaplen), (len+1)/2); m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame) + gaplen + len; memcpy(m->m_data, &frmhdr.an_whdr, sizeof(struct ieee80211_frame)); CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); #if NBPFILTER > 0 if (sc->sc_drvbpf) { struct mbuf mb; struct an_rx_radiotap_header *tap = &sc->sc_rxtap; tap->ar_rate = frmhdr.an_rx_rate; tap->ar_antsignal = frmhdr.an_rx_signal_strength; tap->ar_chan_freq = ic->ic_bss->ni_chan->ic_freq; tap->ar_chan_flags = ic->ic_bss->ni_chan->ic_flags; mb.m_data = (caddr_t)tap; mb.m_len = sizeof(sc->sc_rxtapu); mb.m_next = m; mb.m_nextpkt = NULL; mb.m_type = 0; mb.m_flags = 0; bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); } #endif /* NBPFILTER > 0 */ wh = mtod(m, struct ieee80211_frame *); rxi.rxi_flags = 0; if (wh->i_fc[1] & IEEE80211_FC1_WEP) { /* * WEP is decrypted by hardware. Clear WEP bit * header for ieee80211_input(). */ wh->i_fc[1] &= ~IEEE80211_FC1_WEP; rxi.rxi_flags |= IEEE80211_RXI_HWDEC; } ni = ieee80211_find_rxnode(ic, wh); rxi.rxi_rssi = frmhdr.an_rx_signal_strength; rxi.rxi_tstamp = an_switch32(frmhdr.an_rx_time); ieee80211_input(ifp, m, ni, &rxi); ieee80211_release_node(ic, ni); }
/* * This function is called for each frame received on the high priority queue. * If the hardware has classified this frame as a UAPSD trigger, we locate the node * and deliver data. * For non-trigger frames, we check for PM transition. * Called from interrupt context. */ void ath_net80211_uapsd_deliverdata(ieee80211_handle_t ieee, struct ieee80211_qosframe *qwh, u_int16_t keyix, u_int8_t is_trig, bool isr_context) { struct ieee80211com *ic = NET80211_HANDLE(ieee); struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ieee80211_node *ni; int tid, ac; u_int16_t frame_seq; int queue_depth; bool sent_eosp = false; UNREFERENCED_PARAMETER(isr_context); /* * Locate the node for sender */ IEEE80211_KEYMAP_LOCK(scn); ni = (keyix != HAL_RXKEYIX_INVALID) ? scn->sc_keyixmap[keyix] : NULL; IEEE80211_KEYMAP_UNLOCK(scn); if (ni == NULL) { /* * No key index or no entry, do a lookup */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)qwh); if (ni == NULL) { return; } } else { ieee80211_ref_node(ni); } if (!(ni->ni_flags & IEEE80211_NODE_UAPSD) || (ni->ni_flags & IEEE80211_NODE_ATH_PAUSED)) goto end; /* We cannot have a PM state change if this is a trigger frame */ if (!is_trig) { /* * Must deal with change of state here, since otherwise there would * be a race (on two quick frames from STA) between this code and the * tasklet where we would: * - miss a trigger on entry to PS if we're already trigger hunting * - generate spurious SP on exit (due to frame following exit frame) */ if ((((qwh->i_fc[1] & IEEE80211_FC1_PWR_MGT) == IEEE80211_FC1_PWR_MGT) ^ ((ni->ni_flags & IEEE80211_NODE_UAPSD_TRIG) == IEEE80211_NODE_UAPSD_TRIG))) { ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP; if (qwh->i_fc[1] & IEEE80211_FC1_PWR_MGT) { WME_UAPSD_NODE_TRIGSEQINIT(ni); ni->ni_stats.ns_uapsd_triggerenabled++; ni->ni_flags |= IEEE80211_NODE_UAPSD_TRIG; } else { /* * Node transitioned from UAPSD -> Active state. Flush out UAPSD frames */ ni->ni_stats.ns_uapsd_active++; ni->ni_flags &= ~IEEE80211_NODE_UAPSD_TRIG; scn->sc_ops->process_uapsd_trigger(scn->sc_dev, ATH_NODE_NET80211(ni)->an_sta, WME_UAPSD_NODE_MAXQDEPTH, 0, 1, &sent_eosp, WME_UAPSD_NODE_MAXQDEPTH); } goto end; } } else { /* Is UAPSD trigger */ tid = qwh->i_qos[0] & IEEE80211_QOS_TID; ac = TID_TO_WME_AC(tid); if (WME_UAPSD_AC_CAN_TRIGGER(ac, ni)) { /* * Detect duplicate triggers and drop if so. */ frame_seq = le16toh(*(u_int16_t *)qwh->i_seq); if ((qwh->i_fc[1] & IEEE80211_FC1_RETRY) && frame_seq == ni->ni_uapsd_trigseq[ac]) { ni->ni_stats.ns_uapsd_duptriggers++; DPRINTF(scn, ATH_DEBUG_UAPSD, "%s : Drop duplicate trigger\n", __func__); goto end; } if (IEEE80211_IS_TDLS_NODE(ni)) { /* * TDLS defines any QoS frame with EOSP * set to be a non-trigger frame. Therefore, * ignore trigger. */ if(qwh->i_qos[0] & IEEE80211_QOS_EOSP) { ni->ni_stats.ns_uapsd_ignoretriggers++; DPRINTF(scn, ATH_DEBUG_UAPSD, "%s : TDLS QOS frame with EOSP; ignore trigger\n", __func__); goto end; } } /* * SP in progress for this node, discard trigger. */ if (ni->ni_flags & IEEE80211_NODE_UAPSD_SP) { ni->ni_stats.ns_uapsd_ignoretriggers++; DPRINTF(scn, ATH_DEBUG_UAPSD, "%s : SP in-progress; ignore trigger\n", __func__); goto end; } ni->ni_stats.ns_uapsd_triggers++; DPRINTF(scn, ATH_DEBUG_UAPSD, "%s : Start SP\n", __func__); queue_depth = scn->sc_ops->process_uapsd_trigger(scn->sc_dev, ATH_NODE_NET80211(ni)->an_sta, ni->ni_uapsd_maxsp, ac, 0, &sent_eosp, WME_UAPSD_NODE_MAXQDEPTH); if (queue_depth == -1) goto end; /* start the SP */ ni->ni_flags |= IEEE80211_NODE_UAPSD_SP; ni->ni_uapsd_trigseq[ac] = frame_seq; if (IEEE80211_IS_TDLS_NODE(ni)) { if (sent_eosp) { ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP; DPRINTF(scn, ATH_DEBUG_UAPSD, "%s : TDLS; End SP\n", __func__); } } else { if (!queue_depth && (ni->ni_vap->iv_set_tim != NULL) && IEEE80211_NODE_UAPSD_USETIM(ni)) { ni->ni_vap->iv_set_tim(ni, 0, isr_context); } } } } end: ieee80211_free_node(ni); return; }