static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length) { struct ieee80211_rx_status rx_status; struct wl1271_rx_descriptor *desc; struct sk_buff *skb; u16 *fc; u8 *buf; u8 beacon = 0; skb = dev_alloc_skb(length); if (!skb) { wl1271_error("Couldn't allocate RX frame"); return; } buf = skb_put(skb, length); wl1271_spi_reg_read(wl, WL1271_SLV_MEM_DATA, buf, length, true); /* the data read starts with the descriptor */ desc = (struct wl1271_rx_descriptor *) buf; /* now we pull the descriptor out of the buffer */ skb_pull(skb, sizeof(*desc)); fc = (u16 *)skb->data; if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) beacon = 1; wl1271_rx_status(wl, desc, &rx_status, beacon); wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, beacon ? "beacon" : ""); ieee80211_rx(wl->hw, skb, &rx_status); }
static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data, u32 seg_len, struct page *p) { struct sk_buff *skb; struct mt76x0_rxwi *rxwi; u32 fce_info, truesize = seg_len; /* DMA_INFO field at the beginning of the segment contains only some of * the information, we need to read the FCE descriptor from the end. */ fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN); seg_len -= MT_FCE_INFO_LEN; data += MT_DMA_HDR_LEN; seg_len -= MT_DMA_HDR_LEN; rxwi = (struct mt76x0_rxwi *) data; data += sizeof(struct mt76x0_rxwi); seg_len -= sizeof(struct mt76x0_rxwi); if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info))) dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n"); trace_mt76x0_rx(&dev->mt76, rxwi, fce_info); skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p); if (!skb) return; spin_lock(&dev->mac_lock); ieee80211_rx(dev->mt76.hw, skb); spin_unlock(&dev->mac_lock); }
static void ieee80211_tasklet_handler(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *) data; struct sk_buff *skb; while ((skb = skb_dequeue(&local->skb_queue)) || (skb = skb_dequeue(&local->skb_queue_unreliable))) { switch (skb->pkt_type) { case IEEE80211_RX_MSG: /* Clear skb->pkt_type in order to not confuse kernel * netstack. */ skb->pkt_type = 0; ieee80211_rx(local_to_hw(local), skb); break; case IEEE80211_TX_STATUS_MSG: skb->pkt_type = 0; ieee80211_tx_status(local_to_hw(local), skb); break; default: WARN(1, "mac80211: Packet is of unknown type %d\n", skb->pkt_type); dev_kfree_skb(skb); break; } } }
static void il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb, struct ieee80211_rx_status *stats) { struct il_rx_pkt *pkt = rxb_addr(rxb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt); struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt); struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt); u32 len = le16_to_cpu(rx_hdr->len); struct sk_buff *skb; __le16 fc = hdr->frame_control; u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order; /* We received data from the HW, so stop the watchdog */ if (unlikely(len + IL39_RX_FRAME_SIZE > fraglen)) { D_DROP("Corruption detected!\n"); return; } /* We only process data packets if the interface is open */ if (unlikely(!il->is_open)) { D_DROP("Dropping packet while interface is not open.\n"); return; } if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) { il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); D_INFO("Woke queues - frame received on passive channel\n"); } skb = dev_alloc_skb(SMALL_PACKET_SIZE); if (!skb) { IL_ERR("dev_alloc_skb failed\n"); return; } if (!il3945_mod_params.sw_crypto) il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt, le32_to_cpu(rx_end->status), stats); /* If frame is small enough to fit into skb->head, copy it * and do not consume a full page */ if (len <= SMALL_PACKET_SIZE) { memcpy(skb_put(skb, len), rx_hdr->payload, len); } else { skb_add_rx_frag(skb, 0, rxb->page, (void *)rx_hdr->payload - (void *)pkt, len, fraglen); il->alloc_rxb_page--; rxb->page = NULL; } il_update_stats(il, false, fc, len); memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx(il->hw, skb); }
void mt76_rx(struct mt76_dev *dev, struct sk_buff *skb) { if (!test_bit(MT76_STATE_RUNNING, &dev->state)) { dev_kfree_skb(skb); return; } ieee80211_rx(dev->hw, skb); }
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; } if (!stats.crc) { if (unicast) { rtlpriv->link_info.num_rx_inperiod++; rtlpriv->stats.rxbytesunicast += skb->len; } /* static bcn for roaming */ rtl_beacon_statistic(hw, skb); if (likely(rtl_action_proc(hw, skb, false))) return; } } static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw, struct sk_buff *skb) { _rtl_usb_rx_process_agg(hw, skb); ieee80211_rx(hw, skb); }
static void zd_mac_rx(struct zd_mac *mac, struct sk_buff *skb) { int r; struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); struct ieee80211_rx_stats stats; const struct rx_status *status; if (skb->len < ZD_PLCP_HEADER_SIZE + IEEE80211_1ADDR_LEN + IEEE80211_FCS_LEN + sizeof(struct rx_status)) { ieee->stats.rx_errors++; ieee->stats.rx_length_errors++; goto free_skb; } r = fill_rx_stats(&stats, &status, mac, skb->data, skb->len); if (r) { /* Only packets with rx errors are included here. * The error stats have already been set in fill_rx_stats. */ goto free_skb; } __skb_pull(skb, ZD_PLCP_HEADER_SIZE); __skb_trim(skb, skb->len - (IEEE80211_FCS_LEN + sizeof(struct rx_status))); update_qual_rssi(mac, skb->data, skb->len, stats.signal, status->signal_strength); r = filter_rx(ieee, skb->data, skb->len, &stats); if (r <= 0) { if (r < 0) { ieee->stats.rx_errors++; dev_dbg_f(zd_mac_dev(mac), "Error in packet.\n"); } goto free_skb; } if (ieee->iw_mode == IW_MODE_MONITOR) fill_rt_header(skb_push(skb, sizeof(struct zd_rt_hdr)), mac, &stats, status); r = ieee80211_rx(ieee, skb, &stats); if (r) return; free_skb: /* We are always in a soft irq. */ dev_kfree_skb(skb); }
/* * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211 * * Adds the rxb to a new skb and give it to mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, u16 len, u32 ampdu_status, struct iwl_rx_cmd_buffer *rxb, struct ieee80211_rx_status *stats) { struct sk_buff *skb; unsigned int hdrlen, fraglen; /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr so that splice() or TCP coalesce * are more efficient. */ hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr); memcpy(skb_put(skb, hdrlen), hdr, hdrlen); fraglen = len - hdrlen; if (fraglen) { int offset = (void *)hdr + hdrlen - rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx(mvm->hw, skb); }
/* * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211 * * Adds the rxb to a new skb and give it to mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, u32 ampdu_status, u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { unsigned int hdrlen, fraglen; /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and * an additional 8 bytes for SNAP/ethertype, see below) so that * splice() or TCP coalesce are more efficient. * * Since, in addition, ieee80211_data_to_8023() always pull in at * least 8 bytes (possibly more for mesh) we can do the same here * to save the cost of doing it later. That still doesn't pull in * the actual IP header since the typical case has a SNAP header. * If the latter changes (there are efforts in the standards group * to do so) we should revisit this and ieee80211_data_to_8023(). */ hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr) + crypt_len + 8; memcpy(skb_put(skb, hdrlen), hdr, hdrlen); fraglen = len - hdrlen; if (fraglen) { int offset = (void *)hdr + hdrlen - rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } ieee80211_rx(mvm->hw, skb); }
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } /* static bcn for roaming */ rtl_beacon_statistic(hw, skb); } } static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } /* static bcn for roaming */ rtl_beacon_statistic(hw, skb); if (likely(rtl_action_proc(hw, skb, false))) ieee80211_rx(hw, skb); else dev_kfree_skb_any(skb); } } static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) { struct sk_buff *_skb; struct sk_buff_head rx_queue; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); skb_queue_head_init(&rx_queue); if (rtlusb->usb_rx_segregate_hdl) rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue); WARN_ON(skb_queue_empty(&rx_queue)); while (!skb_queue_empty(&rx_queue)) { _skb = skb_dequeue(&rx_queue); _rtl_usb_rx_process_agg(hw, _skb); ieee80211_rx(hw, _skb); } } #define __RX_SKB_MAX_QUEUED 64 static void _rtl_rx_work(unsigned long param) { struct rtl_usb *rtlusb = (struct rtl_usb *)param; struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); struct sk_buff *skb; while ((skb = skb_dequeue(&rtlusb->rx_queue))) { if (unlikely(IS_USB_STOP(rtlusb))) { dev_kfree_skb_any(skb); continue; } if (likely(!rtlusb->usb_rx_segregate_hdl)) { _rtl_usb_rx_process_noagg(hw, skb); } else { /* TO DO */ _rtl_rx_pre_process(hw, skb); pr_err("rx agg not supported\n"); } } } static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr, unsigned int len) { #if NET_IP_ALIGN != 0 unsigned int padding = 0; #endif /* make function no-op when possible */ if (NET_IP_ALIGN == 0 || len < sizeof(*hdr)) return 0; #if NET_IP_ALIGN != 0 /* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */ /* TODO: deduplicate common code, define helper function instead? */ if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); padding ^= NET_IP_ALIGN; /* Input might be invalid, avoid accessing memory outside * the buffer. */ if ((unsigned long)qc - (unsigned long)hdr < len && *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) padding ^= NET_IP_ALIGN; } if (ieee80211_has_a4(hdr->frame_control)) padding ^= NET_IP_ALIGN; return padding; #endif } #define __RADIO_TAP_SIZE_RSV 32 static void _rtl_rx_completed(struct urb *_urb) { struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context; struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); struct rtl_priv *rtlpriv = rtl_priv(hw); int err = 0; if (unlikely(IS_USB_STOP(rtlusb))) goto free; if (likely(0 == _urb->status)) { unsigned int padding; struct sk_buff *skb; unsigned int qlen; unsigned int size = _urb->actual_length; struct ieee80211_hdr *hdr; if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Too short packet from bulk IN! (len: %d)\n", size); goto resubmit; } qlen = skb_queue_len(&rtlusb->rx_queue); if (qlen >= __RX_SKB_MAX_QUEUED) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Pending RX skbuff queue full! (qlen: %d)\n", qlen); goto resubmit; } hdr = (void *)(_urb->transfer_buffer + RTL_RX_DESC_SIZE); padding = _rtl_rx_get_padding(hdr, size - RTL_RX_DESC_SIZE); skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding); if (!skb) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Can't allocate skb for bulk IN!\n"); goto resubmit; } _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep); /* Make sure the payload data is 4 byte aligned. */ skb_reserve(skb, padding); /* reserve some space for mac80211's radiotap */ skb_reserve(skb, __RADIO_TAP_SIZE_RSV); memcpy(skb_put(skb, size), _urb->transfer_buffer, size); skb_queue_tail(&rtlusb->rx_queue, skb); tasklet_schedule(&rtlusb->rx_work_tasklet); goto resubmit; } switch (_urb->status) { /* disconnect */ case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: goto free; default: break; } resubmit: usb_anchor_urb(_urb, &rtlusb->rx_submitted); err = usb_submit_urb(_urb, GFP_ATOMIC); if (unlikely(err)) { usb_unanchor_urb(_urb); goto free; } return; free: /* On some architectures, usb_free_coherent must not be called from * hardirq context. Queue urb to cleanup list. */ usb_anchor_urb(_urb, &rtlusb->rx_cleanup_urbs); } #undef __RADIO_TAP_SIZE_RSV static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct urb *urb; usb_kill_anchored_urbs(&rtlusb->rx_submitted); tasklet_kill(&rtlusb->rx_work_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); flush_workqueue(rtlpriv->works.rtl_wq); destroy_workqueue(rtlpriv->works.rtl_wq); skb_queue_purge(&rtlusb->rx_queue); while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } } static int _rtl_usb_receive(struct ieee80211_hw *hw) { struct urb *urb; int err; int i; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); WARN_ON(0 == rtlusb->rx_urb_num); /* 1600 == 1514 + max WLAN header + rtk info */ WARN_ON(rtlusb->rx_max_size < 1600); for (i = 0; i < rtlusb->rx_urb_num; i++) { err = -ENOMEM; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Failed to alloc URB!!\n"); goto err_out; } err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); if (err < 0) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Failed to prep_rx_urb!!\n"); usb_free_urb(urb); goto err_out; } usb_anchor_urb(urb, &rtlusb->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) goto err_out; usb_free_urb(urb); } return 0; err_out: usb_kill_anchored_urbs(&rtlusb->rx_submitted); _rtl_usb_cleanup_rx(hw); return err; } static int rtl_usb_start(struct ieee80211_hw *hw) { int err; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); err = rtlpriv->cfg->ops->hw_init(hw); if (!err) { rtl_init_rx_config(hw); /* Enable software */ SET_USB_START(rtlusb); /* should after adapter start and interrupt enable. */ set_hal_start(rtlhal); /* Start bulk IN */ err = _rtl_usb_receive(hw); } return err; }
/** * ethernic_recv(...): Recebe e trata pacotes 0x0808 recebidos da rede. */ int ethernic_recv (struct sk_buff *skb, struct net_device *dev, struct packet_type *pkt, struct net_device *orig_dev) { struct sk_buff *skb_recv=NULL; //struct sk_buff *skb_recv_to_ieee80211rx=NULL; //struct sk_buff *skb_recv_1=NULL; //struct sk_buff *newskb=NULL; //struct sk_buff *_skb=NULL; //struct sk_buff *skb_recv_wlannic=NULL; struct ethhdr* eh=NULL; //static struct net_device *wifi_interf = NULL; //struct lvwnet_reg_omni_header* lh_reg_omni=NULL; struct lvwnet_peers_info_header* lh_peers=NULL; struct lvwnet_only_flag_header* lh_flag=NULL; //struct lvwnet_data_header* lh_data=NULL; uint8_t* newdata = NULL; //uint8_t* _head = NULL; //uint8_t* _data = NULL; //uint8_t* _tail = NULL; //uint8_t* _end = NULL; //int hdrlen = -1; //struct ieee80211_hdr *hdr80211 = NULL; //unsigned int _headlen = 0; //unsigned char *data1, *data2; int len_data = -1; //int len_data_80211 = -1; //int len_2 = -1; qtd_msg_all++; //uint8_t radiotap[26]; /** lock... have or not have... */ //spin_lock(&lvwnet_lock); if( skb_mac_header_was_set(skb) ) { eh = eth_hdr(skb); } else { printk(KERN_ALERT "lvwnet_node: ---- ---- - - skb_mac_header NOT set!\n"); goto ethernic_recv_out; } //printk (KERN_INFO "Frame from %pM, to %pM\n", eh->h_source, eh->h_dest); //normal node if (memcmp( dev->dev_addr, eh->h_dest, ETH_ALEN) != 0 ) { printk (KERN_ALERT "lvwnet_node: frame to other host. Device [%s] is in promiscuous mode? to: %pM, from: %pM.\n", dev->name, eh->h_dest, eh->h_source); goto ethernic_recv_out; } //printk(KERN_ALERT "lvwnet_node: size of skb: %ld / %ld\n",sizeof(skb), sizeof(skb_recv)); skb_recv = skb_copy(skb, GFP_ATOMIC); if (skb_recv == NULL){ printk(KERN_ALERT "lvwnet_node: ERR -> skb_recv(2) == NULL\n"); goto ethernic_recv_out; } //skb_reset_network_header(skb_recv); if (skb_recv->data == NULL) { printk(KERN_ALERT "lvwnet_node: received a NULL skb->data. [%s], line %d\n", __func__, __LINE__); goto ethernic_recv_out; } lh_flag = (struct lvwnet_only_flag_header *) skb_recv->data; /*************************************************************************/ if (lh_flag->message_code == LVWNET_CODE_PEER_INFO){ qtd_msg_peer_info++; printk(KERN_DEBUG "lvwnet_node: received a control frame (0x6) from %pM (Peers information).\n", eh->h_source); if (is_controller == 1){ printk(KERN_ALERT "lvwnet_node: received info frame (0x6) but is the controller... [%s]: %d\n", __func__, __LINE__); goto ethernic_recv_out; } lh_peers = (struct lvwnet_peers_info_header *) skb_recv->data; lh_peers->delay = ntohs(lh_peers->delay); lh_peers->power_rx_dbm = ntohs(lh_peers->power_rx_dbm); /**TODO: verificar se o mac do peer eh o do proprio node, e se a origem eh o controller * caso sim, atualiza parametros (atualizacao centralizada pelo controller, por exemplo * para simular mobilidade conjunta de varios peers). */ //printk(KERN_ALERT "lvwnet_node: %d, %pM\n", lh_peers->message_code, lh_peers->peer_mac); peer_received(lh_peers); goto ethernic_recv_out; } /*************************************************************************/ if (lh_flag->message_code == LVWNET_CODE_REG_OMNI) { qtd_msg_reg_omni++; printk(KERN_ALERT "lvwnet_node: received a registration frame (0x2) from %pM (Register omni peer).\n", eh->h_source); printk(KERN_ALERT "lvwnet_node: received registration frame (0x2) but not controller... [%s]: %d\n", __func__, __LINE__); goto ethernic_recv_out; } /*************************************************************************/ if (lh_flag->message_code == LVWNET_CODE_DATA) { qtd_msg_data++; /**TODO: needs this? */ /*if (wifi_interf == NULL){ wifi_interf = find_nic("wlan0"); if (wifi_interf == NULL) { printk(KERN_ALERT "lvwnet_node: wireless interface (%s) not found! [%s:%d]\n", "wlan0", __func__, __LINE__); goto ethernic_recv_out; } }*/ //skb->dev = wifi_interf; if (hw == NULL) { printk(KERN_ALERT "lvwnet_node: hw is NULL. Wireless NIC is present? (maybe not SoftMAC compatible...) [%s]\n", __func__); } else { //skb->dev = wifi_interf; //skb_recv_to_ieee80211rx = skb_copy(skb, GFP_ATOMIC); newdata = skb_pull(skb_recv, 1); //skb_trim(skb_recv, 2); skb_reset_network_header(skb_recv); len_data = skb_recv->len; if (skb_recv->data_len != 0) { printk(KERN_DEBUG "lvwnet_node: data_len != 0. Non-linear skb here... [%s:%d]\n", __func__, __LINE__); } if (skb_is_nonlinear(skb_recv)) { printk(KERN_DEBUG "lvwnet_node: skb_is_nonlinear returned true. Non-linear skb here... [%s:%d]\n", __func__, __LINE__); } if (skb_recv->data == NULL) { printk(KERN_DEBUG "lvwnet_node: skb_recv_to_ieee80211rx->data == NULL. bad... [%s:%d]\n", __func__, __LINE__); goto ethernic_recv_out; } //skb_recv_to_ieee80211rx->csum = skb_checksum_complete(skb_recv_to_ieee80211rx); //printk(KERN_DEBUG "lvwnet_node: csum ->[%d, %d] \n", skb_recv->csum, skb_checksum_complete(skb_recv)); //ieee80211_rx_irqsafe(hw, skb_recv); ieee80211_rx(hw, skb_recv); } goto ethernic_recv_out; } else { printk(KERN_ALERT "lvwnet_node: received a unknow message code [%d] \n", lh_flag->message_code); } ethernic_recv_out: dev_kfree_skb (skb); //spin_unlock(&lvwnet_lock); //dev_kfree_skb (skb_recv_to_ieee80211rx); return 1; }
static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len) { struct ar9170_rx_head *head; struct ar9170_rx_macstatus *mac; struct ar9170_rx_phystatus *phy = NULL; struct ieee80211_rx_status status; struct sk_buff *skb; int mpdu_len; u8 mac_status; if (!IS_STARTED(ar)) return; if (unlikely(len < sizeof(*mac))) goto drop; mpdu_len = len - sizeof(*mac); mac = (void *)(buf + mpdu_len); mac_status = mac->status; switch (mac_status & AR9170_RX_STATUS_MPDU) { case AR9170_RX_STATUS_MPDU_FIRST: /* Aggregated MPDUs start with an PLCP header */ if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) { head = (void *) buf; /* * The PLCP header needs to be cached for the * following MIDDLE + LAST A-MPDU packets. * * So, if you are wondering why all frames seem * to share a common RX status information, * then you have the answer right here... */ memcpy(&ar->rx_plcp, (void *) buf, sizeof(struct ar9170_rx_head)); mpdu_len -= sizeof(struct ar9170_rx_head); buf += sizeof(struct ar9170_rx_head); ar->rx_has_plcp = true; } else { if (net_ratelimit()) { wiphy_err(ar->hw->wiphy, "plcp info " "is clipped.\n"); } goto drop; } break; case AR9170_RX_STATUS_MPDU_LAST: /* * The last frame of an A-MPDU has an extra tail * which does contain the phy status of the whole * aggregate. */ if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) { mpdu_len -= sizeof(struct ar9170_rx_phystatus); phy = (void *)(buf + mpdu_len); } else { if (net_ratelimit()) { wiphy_err(ar->hw->wiphy, "frame tail " "is clipped.\n"); } goto drop; } case AR9170_RX_STATUS_MPDU_MIDDLE: /* These are just data + mac status */ if (unlikely(!ar->rx_has_plcp)) { if (!net_ratelimit()) return; wiphy_err(ar->hw->wiphy, "rx stream does not start " "with a first_mpdu frame tag.\n"); goto drop; } head = &ar->rx_plcp; break; case AR9170_RX_STATUS_MPDU_SINGLE: /* single mpdu has both: plcp (head) and phy status (tail) */ head = (void *) buf; mpdu_len -= sizeof(struct ar9170_rx_head); mpdu_len -= sizeof(struct ar9170_rx_phystatus); buf += sizeof(struct ar9170_rx_head); phy = (void *)(buf + mpdu_len); break; default: BUG_ON(1); break; } /* FC + DU + RA + FCS */ if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN))) goto drop; memset(&status, 0, sizeof(status)); if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status))) goto drop; if (!carl9170_ampdu_check(ar, buf, mac_status)) goto drop; if (phy) carl9170_rx_phy_status(ar, phy, &status); carl9170_ps_beacon(ar, buf, mpdu_len); skb = carl9170_rx_copy_data(buf, mpdu_len); if (!skb) goto drop; memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); ieee80211_rx(ar->hw, skb); return; drop: ar->rx_dropped++; }
void mwl_rx_recv(unsigned long data) { struct ieee80211_hw *hw = (struct ieee80211_hw *)data; struct mwl_priv *priv; struct mwl_rx_desc *curr_desc; int work_done = 0; struct sk_buff *prx_skb = NULL; int pkt_len; struct ieee80211_rx_status status; struct mwl_vif *mwl_vif = NULL; struct ieee80211_hdr *wh; u32 status_mask; WLDBG_ENTER(DBG_LEVEL_4); BUG_ON(!hw); priv = hw->priv; BUG_ON(!priv); curr_desc = priv->desc_data[0].pnext_rx_desc; if (curr_desc == NULL) { status_mask = readl(priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK); writel(status_mask | MACREG_A2HRIC_BIT_RX_RDY, priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK); priv->is_rx_schedule = false; WLDBG_EXIT_INFO(DBG_LEVEL_4, "busy or no receiving packets"); return; } while ((curr_desc->rx_control == EAGLE_RXD_CTRL_DMA_OWN) && (work_done < priv->recv_limit)) { prx_skb = curr_desc->psk_buff; if (prx_skb == NULL) goto out; pci_unmap_single(priv->pdev, ENDIAN_SWAP32(curr_desc->pphys_buff_data), priv->desc_data[0].rx_buf_size, PCI_DMA_FROMDEVICE); pkt_len = curr_desc->pkt_len; if (skb_tailroom(prx_skb) < pkt_len) { WLDBG_PRINT("Critical error: not enough tail room =%x pkt_len=%x, curr_desc=%x, curr_desc_data=%x", skb_tailroom(prx_skb), pkt_len, curr_desc, curr_desc->pbuff_data); dev_kfree_skb_any(prx_skb); goto out; } if (curr_desc->channel != hw->conf.chandef.chan->hw_value) { dev_kfree_skb_any(prx_skb); goto out; } mwl_rx_prepare_status(curr_desc, &status); priv->noise = -curr_desc->noise_floor; wh = &((struct mwl_dma_data *)prx_skb->data)->wh; if (ieee80211_has_protected(wh->frame_control)) { /* Check if hw crypto has been enabled for * this bss. If yes, set the status flags * accordingly */ if (ieee80211_has_tods(wh->frame_control)) mwl_vif = mwl_rx_find_vif_bss(&priv->vif_list, wh->addr1); else mwl_vif = mwl_rx_find_vif_bss(&priv->vif_list, wh->addr2); if (mwl_vif != NULL && mwl_vif->is_hw_crypto_enabled) { /* * When MMIC ERROR is encountered * by the firmware, payload is * dropped and only 32 bytes of * mwl8k Firmware header is sent * to the host. * * We need to add four bytes of * key information. In it * MAC80211 expects keyidx set to * 0 for triggering Counter * Measure of MMIC failure. */ if (status.flag & RX_FLAG_MMIC_ERROR) { struct mwl_dma_data *tr; tr = (struct mwl_dma_data *)prx_skb->data; memset((void *)&(tr->data), 0, 4); pkt_len += 4; } if (!ieee80211_is_auth(wh->frame_control)) status.flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; } } skb_put(prx_skb, pkt_len); mwl_rx_remove_dma_header(prx_skb, curr_desc->qos_ctrl); memcpy(IEEE80211_SKB_RXCB(prx_skb), &status, sizeof(status)); ieee80211_rx(hw, prx_skb); out: mwl_rx_refill(priv, curr_desc); curr_desc->rx_control = EAGLE_RXD_CTRL_DRIVER_OWN; curr_desc->qos_ctrl = 0; curr_desc = curr_desc->pnext; work_done++; } priv->desc_data[0].pnext_rx_desc = curr_desc; status_mask = readl(priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK); writel(status_mask | MACREG_A2HRIC_BIT_RX_RDY, priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK); priv->is_rx_schedule = false; WLDBG_EXIT(DBG_LEVEL_4); }
static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, struct ieee80211_hdr *hdr, u16 len, u32 ampdu_status, struct iwl_rx_mem_buffer *rxb, struct ieee80211_rx_status *stats) { struct sk_buff *skb; int ret = 0; __le16 fc = hdr->frame_control; /* We only process data packets if the interface is open */ if (unlikely(!priv->is_open)) { IWL_DEBUG_DROP_LIMIT(priv, "Dropping packet while interface is not open.\n"); return; } /* In case of HW accelerated crypto and bad decryption, drop */ if (!priv->cfg->mod_params->sw_crypto && iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) return; skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC); if (!skb) { IWL_ERR(priv, "alloc_skb failed\n"); return; } skb_reserve(skb, IWL_LINK_HDR_MAX); skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); /* mac80211 currently doesn't support paged SKB. Convert it to * linear SKB for management frame and data frame requires * software decryption or software defragementation. */ if (ieee80211_is_mgmt(fc) || ieee80211_has_protected(fc) || ieee80211_has_morefrags(fc) || le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG || (ieee80211_is_data_qos(fc) && *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)) ret = skb_linearize(skb); else ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? 0 : -ENOMEM; if (ret) { kfree_skb(skb); goto out; } /* * XXX: We cannot touch the page and its virtual memory (hdr) after * here. It might have already been freed by the above skb change. */ iwl_update_stats(priv, false, fc, len); memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx(priv->hw, skb); out: priv->alloc_rxb_page--; rxb->page = NULL; }
static void ieee80211p_tasklet_rx(unsigned long data) { /* Driver's private data */ struct ieee80211p_priv *priv = (void *)data; /* RX skb */ struct sk_buff *skb = priv->rx_skb; /* RX status */ struct ieee80211_rx_status *rxs = NULL; struct ieee80211p_rx_status *rs = NULL; /* Netlink header */ struct nlmsghdr *nlh = NULL; /* Netlink command */ char *nlcmd = NULL; /* Received data rate index */ int rate_idx = -1; /* lock */ spin_lock(&priv->rxq_lock); /************************ * Netlink skb handling * ************************/ printk(KERN_ERR "ieee80211p_tasklet_rx: receiving data from PHY\n"); if (skb == NULL) { printk(KERN_ERR "ieee80211_tasklet_rx: received skb == NULL\n"); goto error; } /* Get the netlink message header */ nlh = (struct nlmsghdr *)skb->data; /* Check the command of the received msg */ nlcmd = (char *)NLMSG_DATA(nlh); if (*nlcmd == NLCMD_INIT) { /* Keep track of the softmodem pid */ priv->pid_softmodem = nlh->nlmsg_pid; printk(KERN_ERR "ieee80211_tasklet_rx: NLCMD_INIT received / softmodem pid = %u\n",priv->pid_softmodem); dev_kfree_skb_any(skb); goto error; } /* Remove the nlmsg header + netlink command */ rs = (struct ieee80211p_rx_status *)skb_pull(skb,sizeof(struct nlmsghdr)+NLCMD_SIZE); if (rs == NULL) { printk(KERN_ERR "ieee80211_tasklet_rx: rx status == NULL\n"); dev_kfree_skb_any(skb); goto error; } /********* * Stats * *********/ rxs = IEEE80211_SKB_RXCB(skb); if (rxs == NULL) { printk(KERN_ERR "ieee80211_tasklet_rx: rx status == NULL\n"); dev_kfree_skb_any(skb); goto error; } /* Keep track of the stats sent by the softmodem */ rxs->freq = priv->cur_chan->center_freq; rxs->signal = rs->rssi; rxs->band = rs->band; rxs->flag = 0; rate_idx = find_rate_idx(priv,rxs->band,rs->rate); if (rate_idx == -1) { printk(KERN_ERR "ieee80211_tasklet_rx: unknown data rate %u\n",rs->rate); dev_kfree_skb_any(skb); goto error; } else { rxs->rate_idx = rate_idx; } if (rs->flags & IEEE80211P_MMIC_ERROR) { rxs->flag |= RX_FLAG_MMIC_ERROR; } if (rs->flags & IEEE80211P_FAILED_FCS_CRC) { rxs->flag |= RX_FLAG_FAILED_FCS_CRC; } if (rs->flags & IEEE80211P_FAILED_PLCP_CRC) { rxs->flag |= RX_FLAG_FAILED_PLCP_CRC; } if (rs->flags & IEEE80211P_MACTIME_MPDU) { rxs->flag |= RX_FLAG_MACTIME_MPDU; } if (rs->flags & IEEE80211P_NO_SIGNAL_VAL) { rxs->flag |= RX_FLAG_NO_SIGNAL_VAL; } /* Remove the rx status from the skb */ skb_pull(skb,sizeof(struct ieee80211p_rx_status)); printk(KERN_ERR "ieee80211p_tasklet_rx: sending data to ieee80211\n"); /* Give skb to the mac80211 subsystem */ ieee80211_rx(priv->hw, skb); error: /* unlock */ spin_unlock(&priv->rxq_lock); } /* ieee80211p_tasklet_rx */