void wlRecv(struct net_device *netdev) { struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev); int work_done = 0; wlrxdesc_t *pCurrent = ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc; static Bool_e isFunctionBusy = WL_FALSE; int receivedHandled = 0; u_int32_t rxRdPtr; u_int32_t rxWrPtr; struct sk_buff *pRxSkBuff=NULL; WL_BUFF *wlb = NULL; void *pCurrentData; u_int8_t rxRate; int rxCount; int rssi; vmacApInfo_t *vmacSta_p = wlpptr->vmacSta_p; u_int32_t status; u_int32_t rssi_paths; WLDBG_ENTER(DBG_LEVEL_14); /* In a corner case the descriptors may be uninitialized and not usable, accessing these may cause a crash */ if (isFunctionBusy || (pCurrent == NULL)) { return; } isFunctionBusy = WL_TRUE; rxRdPtr = readl(wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescRead); rxWrPtr = readl(wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescWrite); while ((pCurrent->RxControl ==EAGLE_RXD_CTRL_DMA_OWN) && (work_done < vmacSta_p->work_to_do) ) { /* AUTOCHANNEL */ { if(vmacSta_p->StopTraffic) goto out; } /* AUTOCHANNEL */ rxCount = ENDIAN_SWAP16(pCurrent->PktLen); pRxSkBuff = pCurrent->pSkBuff; if (pRxSkBuff == NULL) { goto out; } pci_unmap_single(wlpptr->pPciDev, ENDIAN_SWAP32(pCurrent->pPhysBuffData), ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize, PCI_DMA_FROMDEVICE); pCurrentData = pCurrent->pBuffData; rxRate = pCurrent->Rate; status = (u_int32_t)pCurrent->Status; pRxSkBuff->protocol = 0; if(pCurrent->QosCtrl & IEEE_QOS_CTL_AMSDU) { pRxSkBuff->protocol |= WL_WLAN_TYPE_AMSDU; } rssi = (int)pCurrent->RSSI + W836X_RSSI_OFFSET; rssi_paths = *((u_int32_t *)&pCurrent->HwRssiInfo); if (skb_tailroom(pRxSkBuff) >= rxCount) { skb_put(pRxSkBuff, rxCount ); skb_pull(pRxSkBuff, 2); } else { WLDBG_INFO(DBG_LEVEL_14,"Not enough tail room =%x recvlen=%x, pCurrent=%x, pCurrentData=%x", WL_BUFF_TAILROOM(pRxSkBuff), rxCount,pCurrent, pCurrentData); WL_SKB_FREE(pRxSkBuff); goto out; } wlpptr->netDevStats->rx_packets++; wlb = WL_BUFF_PTR(pRxSkBuff); WL_PREPARE_BUF_INFO(pRxSkBuff); if(pCurrent->HtSig2 & 0x8 ) { u_int8_t ampdu_qos; /** use bit 3 for ampdu flag, and 0,1,2,3 for qos so as to save a register **/ ampdu_qos = 8|(pCurrent->QosCtrl&0x7); work_done+=ieee80211_input(wlpptr, wlb,rssi,rssi_paths,ampdu_qos,status); } else { u_int8_t ampdu_qos; /** use bit 3 for ampdu flag, and 0,1,2,3 for qos so as to save a register **/ ampdu_qos = 0|(pCurrent->QosCtrl&0x7); work_done+=ieee80211_input(wlpptr, wlb,rssi,rssi_paths,ampdu_qos,status); } wlpptr->netDevStats->rx_bytes += pRxSkBuff->len; { pCurrent->pSkBuff = dev_alloc_skb(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize); if (pCurrent->pSkBuff != NULL) { if(skb_linearize(pCurrent->pSkBuff)) { WL_SKB_FREE(pCurrent->pSkBuff); printk(KERN_ERR "%s: Need linearize memory\n", netdev->name); goto out; } skb_reserve(pCurrent->pSkBuff , MIN_BYTES_HEADROOM); pCurrent->Status = EAGLE_RXD_STATUS_OK; pCurrent->QosCtrl = 0x0000; pCurrent->Channel = 0x00; pCurrent->RSSI = 0x00; pCurrent->SQ2 = 0x00; pCurrent->PktLen = 6*netdev->mtu + NUM_EXTRA_RX_BYTES; pCurrent->pBuffData = pCurrent->pSkBuff->data; pCurrent->pPhysBuffData = ENDIAN_SWAP32(pci_map_single(wlpptr->pPciDev, pCurrent->pSkBuff->data, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize/*+sizeof(struct skb_shared_info)*/, PCI_DMA_BIDIRECTIONAL)); } } out: receivedHandled++; pCurrent->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN; pCurrent->QosCtrl =0; rxRdPtr = ENDIAN_SWAP32(pCurrent->pPhysNext); pCurrent = pCurrent->pNext; } writel(rxRdPtr, wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescRead); ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc = pCurrent; isFunctionBusy = WL_FALSE; WLDBG_EXIT(DBG_LEVEL_14); }
void rtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct rtwn_usb_softc *uc = usbd_xfer_softc(xfer); struct rtwn_softc *sc = &uc->uc_sc; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; struct mbuf *m = NULL, *next; struct rtwn_data *data; int8_t nf, rssi; RTWN_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&uc->uc_rx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&uc->uc_rx_active, next); m = rtwn_report_intr(uc, xfer, data); STAILQ_INSERT_TAIL(&uc->uc_rx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&uc->uc_rx_inactive); if (data == NULL) { KASSERT(m == NULL, ("mbuf isn't NULL")); goto finish; } STAILQ_REMOVE_HEAD(&uc->uc_rx_inactive, next); STAILQ_INSERT_TAIL(&uc->uc_rx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * To avoid LOR we should unlock our private mutex here to call * ieee80211_input() because here is at the end of a USB * callback and safe to unlock. */ while (m != NULL) { next = m->m_next; m->m_next = NULL; ni = rtwn_rx_frame(sc, m, &rssi); RTWN_UNLOCK(sc); nf = RTWN_NOISE_FLOOR; if (ni != NULL) { if (ni->ni_flags & IEEE80211_NODE_HT) m->m_flags |= M_AMPDU; (void)ieee80211_input(ni, m, rssi - nf, nf); ieee80211_free_node(ni); } else { (void)ieee80211_input_all(ic, m, rssi - nf, nf); } RTWN_LOCK(sc); m = next; } break; default: /* needs it to the inactive queue due to a error. */ data = STAILQ_FIRST(&uc->uc_rx_active); if (data != NULL) { STAILQ_REMOVE_HEAD(&uc->uc_rx_active, next); STAILQ_INSERT_TAIL(&uc->uc_rx_inactive, data, next); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } break; } finish: /* Finished receive; age anything left on the FF queue by a little bump */ /* * XXX TODO: just make this a callout timer schedule so we can * flush the FF staging queue if we're approaching idle. */ #ifdef IEEE80211_SUPPORT_SUPERG if (!(sc->sc_flags & RTWN_FW_LOADED) || sc->sc_ratectl != RTWN_RATECTL_NET80211) rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all); #endif /* Kick-start more transmit in case we stalled */ rtwn_start(sc); }
static void arn_rx_handler(struct arn_softc *sc) { #define PA2DESC(_sc, _pa) \ ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ ((_pa) - (_sc)->sc_desc_dma.cookie.dmac_address))) ieee80211com_t *ic = (ieee80211com_t *)sc; struct ath_buf *bf; struct ath_hal *ah = sc->sc_ah; struct ath_desc *ds; struct ath_rx_status *rs; mblk_t *rx_mp; struct ieee80211_frame *wh; int32_t len, ngood, loop = 1; uint8_t phyerr; int status; struct ieee80211_node *in; ngood = 0; do { mutex_enter(&sc->sc_rxbuflock); bf = list_head(&sc->sc_rxbuf_list); if (bf == NULL) { ARN_DBG((ARN_DBG_RECV, "arn: arn_rx_handler(): " "no buffer\n")); mutex_exit(&sc->sc_rxbuflock); break; } ASSERT(bf->bf_dma.cookie.dmac_address != NULL); ds = bf->bf_desc; if (ds->ds_link == bf->bf_daddr) { /* * Never process the self-linked entry at the end, * this may be met at heavy load. */ mutex_exit(&sc->sc_rxbuflock); break; } /* * Must provide the virtual address of the current * descriptor, the physical address, and the virtual * address of the next descriptor in the h/w chain. * This allows the HAL to look ahead to see if the * hardware is done with a descriptor by checking the * done bit in the following descriptor and the address * of the current descriptor the DMA engine is working * on. All this is necessary because of our use of * a self-linked list to avoid rx overruns. */ status = ath9k_hw_rxprocdesc(ah, ds, bf->bf_daddr, PA2DESC(sc, ds->ds_link), 0); if (status == EINPROGRESS) { mutex_exit(&sc->sc_rxbuflock); break; } list_remove(&sc->sc_rxbuf_list, bf); mutex_exit(&sc->sc_rxbuflock); rs = &ds->ds_rxstat; if (rs->rs_status != 0) { if (rs->rs_status & ATH9K_RXERR_CRC) { sc->sc_stats.ast_rx_crcerr++; } if (rs->rs_status & ATH9K_RXERR_FIFO) { sc->sc_stats.ast_rx_fifoerr++; } if (rs->rs_status & ATH9K_RXERR_DECRYPT) { sc->sc_stats.ast_rx_badcrypt++; } if (rs->rs_status & ATH9K_RXERR_PHY) { sc->sc_stats.ast_rx_phyerr++; phyerr = rs->rs_phyerr & 0x1f; sc->sc_stats.ast_rx_phy[phyerr]++; } goto rx_next; } len = rs->rs_datalen; /* less than sizeof(struct ieee80211_frame) */ if (len < 20) { sc->sc_stats.ast_rx_tooshort++; goto rx_next; } if ((rx_mp = allocb(sc->sc_dmabuf_size, BPRI_MED)) == NULL) { arn_problem("arn: arn_rx_handler(): " "allocing mblk buffer failed.\n"); return; } ARN_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORCPU); bcopy(bf->bf_dma.mem_va, rx_mp->b_rptr, len); rx_mp->b_wptr += len; wh = (struct ieee80211_frame *)rx_mp->b_rptr; if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) { /* * Ignore control frame received in promisc mode. */ freemsg(rx_mp); goto rx_next; } /* Remove the CRC at the end of IEEE80211 frame */ rx_mp->b_wptr -= IEEE80211_CRC_LEN; #ifdef DEBUG arn_printrxbuf(bf, status == 0); #endif /* * Locate the node for sender, track state, and then * pass the (referenced) node up to the 802.11 layer * for its use. */ in = ieee80211_find_rxnode(ic, wh); /* * Send the frame to net80211 for processing */ (void) ieee80211_input(ic, rx_mp, in, rs->rs_rssi, rs->rs_tstamp); /* release node */ ieee80211_free_node(in); /* * Arrange to update the last rx timestamp only for * frames from our ap when operating in station mode. * This assumes the rx key is always setup when associated. */ if (ic->ic_opmode == IEEE80211_M_STA && rs->rs_keyix != ATH9K_RXKEYIX_INVALID) { ngood++; } /* * change the default rx antenna if rx diversity chooses the * other antenna 3 times in a row. */ if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { if (++sc->sc_rxotherant >= 3) { ath9k_hw_setantenna(sc->sc_ah, ds->ds_rxstat.rs_antenna); sc->sc_defant = ds->ds_rxstat.rs_antenna; sc->sc_rxotherant = 0; } } else { sc->sc_rxotherant = 0; } rx_next: mutex_enter(&sc->sc_rxbuflock); list_insert_tail(&sc->sc_rxbuf_list, bf); mutex_exit(&sc->sc_rxbuflock); arn_rx_buf_link(sc, bf); } while (loop); if (ngood) sc->sc_lastrx = ath9k_hw_gettsf64(ah); #undef PA2DESC }
void an_rxeof(struct an_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ifnet *ifp = &ic->ic_if; struct ieee80211_frame *wh; struct ieee80211_rxinfo rxi; struct ieee80211_node *ni; struct an_rxframe frmhdr; struct mbuf *m; u_int16_t status; int fid, gaplen, len, off; uint8_t *gap; fid = CSR_READ_2(sc, AN_RX_FID); /* First read in the frame header */ if (an_read_bap(sc, fid, 0, &frmhdr, sizeof(frmhdr), sizeof(frmhdr)) != 0) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); ifp->if_ierrors++; DPRINTF(("an_rxeof: read fid %x failed\n", fid)); return; } an_swap16((u_int16_t *)&frmhdr.an_whdr, sizeof(struct ieee80211_frame)/2); status = frmhdr.an_rx_status; if ((status & AN_STAT_ERRSTAT) != 0 && ic->ic_opmode != IEEE80211_M_MONITOR) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); ifp->if_ierrors++; DPRINTF(("an_rxeof: fid %x status %x\n", fid, status)); return; } /* the payload length field includes a 16-bit "mystery field" */ len = frmhdr.an_rx_payload_len - sizeof(uint16_t); off = ALIGN(sizeof(struct ieee80211_frame)); if (off + len > MCLBYTES) { if (ic->ic_opmode != IEEE80211_M_MONITOR) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); ifp->if_ierrors++; DPRINTF(("an_rxeof: oversized packet %d\n", len)); return; } len = 0; } MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); ifp->if_ierrors++; DPRINTF(("an_rxeof: MGET failed\n")); return; } if (off + len + AN_GAPLEN_MAX > MHLEN) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); m_freem(m); ifp->if_ierrors++; DPRINTF(("an_rxeof: MCLGET failed\n")); return; } } m->m_data += off - sizeof(struct ieee80211_frame); if (ic->ic_opmode != IEEE80211_M_MONITOR) { gaplen = frmhdr.an_gaplen; if (gaplen > AN_GAPLEN_MAX) { CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); m_freem(m); ifp->if_ierrors++; DPRINTF(("%s: gap too long\n", __func__)); return; } /* * We don't need the 16-bit mystery field (payload length?), * so read it into the region reserved for the 802.11 header. * * When Cisco Aironet 350 cards w/ firmware version 5 or * greater operate with certain Cisco 350 APs, * the "gap" is filled with the SNAP header. Read * it in after the 802.11 header. */ gap = m->m_data + sizeof(struct ieee80211_frame) - sizeof(uint16_t); an_read_bap(sc, fid, -1, gap, gaplen + sizeof(u_int16_t), gaplen + sizeof(u_int16_t)); } else gaplen = 0; an_read_bap(sc, fid, -1, m->m_data + sizeof(struct ieee80211_frame) + gaplen, len, len); an_swap16((u_int16_t *)(m->m_data + sizeof(struct ieee80211_frame) + gaplen), (len+1)/2); m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame) + gaplen + len; memcpy(m->m_data, &frmhdr.an_whdr, sizeof(struct ieee80211_frame)); CSR_WRITE_2(sc, AN_EVENT_ACK, AN_EV_RX); #if NBPFILTER > 0 if (sc->sc_drvbpf) { struct mbuf mb; struct an_rx_radiotap_header *tap = &sc->sc_rxtap; tap->ar_rate = frmhdr.an_rx_rate; tap->ar_antsignal = frmhdr.an_rx_signal_strength; tap->ar_chan_freq = ic->ic_bss->ni_chan->ic_freq; tap->ar_chan_flags = ic->ic_bss->ni_chan->ic_flags; mb.m_data = (caddr_t)tap; mb.m_len = sizeof(sc->sc_rxtapu); mb.m_next = m; mb.m_nextpkt = NULL; mb.m_type = 0; mb.m_flags = 0; bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); } #endif /* NBPFILTER > 0 */ wh = mtod(m, struct ieee80211_frame *); rxi.rxi_flags = 0; if (wh->i_fc[1] & IEEE80211_FC1_WEP) { /* * WEP is decrypted by hardware. Clear WEP bit * header for ieee80211_input(). */ wh->i_fc[1] &= ~IEEE80211_FC1_WEP; rxi.rxi_flags |= IEEE80211_RXI_HWDEC; } ni = ieee80211_find_rxnode(ic, wh); rxi.rxi_rssi = frmhdr.an_rx_signal_strength; rxi.rxi_tstamp = an_switch32(frmhdr.an_rx_time); ieee80211_input(ifp, m, ni, &rxi); ieee80211_release_node(ic, ni); }