/* * extract packet from rx queue */ static int bcm_enet_receive_queue(struct net_device *dev, int budget) { struct bcm_enet_priv *priv; struct device *kdev; int processed; priv = netdev_priv(dev); kdev = &priv->pdev->dev; processed = 0; /* don't scan ring further than number of refilled * descriptor */ if (budget > priv->rx_desc_count) budget = priv->rx_desc_count; do { struct bcm_enet_desc *desc; struct sk_buff *skb; int desc_idx; u32 len_stat; unsigned int len; desc_idx = priv->rx_curr_desc; desc = &priv->rx_desc_cpu[desc_idx]; /* make sure we actually read the descriptor status at * each loop */ rmb(); len_stat = desc->len_stat; /* break if dma ownership belongs to hw */ if (len_stat & DMADESC_OWNER_MASK) break; processed++; priv->rx_curr_desc++; if (priv->rx_curr_desc == priv->rx_ring_size) priv->rx_curr_desc = 0; priv->rx_desc_count--; /* if the packet does not have start of packet _and_ * end of packet flag set, then just recycle it */ if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { dev->stats.rx_dropped++; continue; } /* recycle packet if it's marked as bad */ if (unlikely(len_stat & DMADESC_ERR_MASK)) { dev->stats.rx_errors++; if (len_stat & DMADESC_OVSIZE_MASK) dev->stats.rx_length_errors++; if (len_stat & DMADESC_CRC_MASK) dev->stats.rx_crc_errors++; if (len_stat & DMADESC_UNDER_MASK) dev->stats.rx_frame_errors++; if (len_stat & DMADESC_OV_MASK) dev->stats.rx_fifo_errors++; continue; } /* valid packet */ skb = priv->rx_skb[desc_idx]; len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; /* don't include FCS */ len -= 4; if (len < copybreak) { struct sk_buff *nskb; nskb = netdev_alloc_skb_ip_align(dev, len); if (!nskb) { /* forget packet, just rearm desc */ dev->stats.rx_dropped++; continue; } dma_sync_single_for_cpu(kdev, desc->address, len, DMA_FROM_DEVICE); memcpy(nskb->data, skb->data, len); dma_sync_single_for_device(kdev, desc->address, len, DMA_FROM_DEVICE); skb = nskb; } else { dma_unmap_single(&priv->pdev->dev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); priv->rx_skb[desc_idx] = NULL; } skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += len; netif_receive_skb(skb); } while (--budget > 0); if (processed || !priv->rx_desc_count) { bcm_enet_refill_rx(dev); /* kick rx dma */ enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, ENETDMA_CHANCFG_REG(priv->rx_chan)); } return processed; }
void rtw_os_recv_indicate_pkt(_adapter *padapter, _pkt *pkt, struct rx_pkt_attrib *pattrib) { struct mlme_priv*pmlmepriv = &padapter->mlmepriv; struct recv_priv *precvpriv = &(padapter->recvpriv); #ifdef CONFIG_BR_EXT void *br_port = NULL; #endif int ret; /* Indicat the packets to upper layer */ if (pkt) { if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE) { _pkt *pskb2=NULL; struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; int bmcast = IS_MCAST(pattrib->dst); //DBG_871X("bmcast=%d\n", bmcast); if (_rtw_memcmp(pattrib->dst, adapter_mac_addr(padapter), ETH_ALEN) == _FALSE) { //DBG_871X("not ap psta=%p, addr=%pM\n", psta, pattrib->dst); if(bmcast) { psta = rtw_get_bcmc_stainfo(padapter); pskb2 = rtw_skb_clone(pkt); } else { psta = rtw_get_stainfo(pstapriv, pattrib->dst); } if(psta) { struct net_device *pnetdev= (struct net_device*)padapter->pnetdev; //DBG_871X("directly forwarding to the rtw_xmit_entry\n"); //skb->ip_summed = CHECKSUM_NONE; pkt->dev = pnetdev; #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35)) skb_set_queue_mapping(pkt, rtw_recv_select_queue(pkt)); #endif //LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35) _rtw_xmit_entry(pkt, pnetdev); if(bmcast && (pskb2 != NULL) ) { pkt = pskb2; DBG_COUNTER(padapter->rx_logs.os_indicate_ap_mcast); } else { DBG_COUNTER(padapter->rx_logs.os_indicate_ap_forward); return; } } } else// to APself { //DBG_871X("to APSelf\n"); DBG_COUNTER(padapter->rx_logs.os_indicate_ap_self); } } #ifdef CONFIG_BR_EXT // Insert NAT2.5 RX here! #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) br_port = padapter->pnetdev->br_port; #else // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) rcu_read_lock(); br_port = rcu_dereference(padapter->pnetdev->rx_handler_data); rcu_read_unlock(); #endif // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) if( br_port && (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) ) { int nat25_handle_frame(_adapter *priv, struct sk_buff *skb); if (nat25_handle_frame(padapter, pkt) == -1) { //priv->ext_stats.rx_data_drops++; //DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n"); //return FAIL; #if 1 // bypass this frame to upper layer!! #else rtw_skb_free(sub_skb); continue; #endif } } #endif // CONFIG_BR_EXT if( precvpriv->sink_udpport > 0) rtw_sink_rtp_seq_dbg(padapter,pkt); pkt->protocol = eth_type_trans(pkt, padapter->pnetdev); pkt->dev = padapter->pnetdev; #ifdef CONFIG_TCP_CSUM_OFFLOAD_RX if ( (pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1) ) { pkt->ip_summed = CHECKSUM_UNNECESSARY; } else { pkt->ip_summed = CHECKSUM_NONE; } #else /* !CONFIG_TCP_CSUM_OFFLOAD_RX */ pkt->ip_summed = CHECKSUM_NONE; #endif //CONFIG_TCP_CSUM_OFFLOAD_RX ret = rtw_netif_rx(padapter->pnetdev, pkt); if (ret == NET_RX_SUCCESS) DBG_COUNTER(padapter->rx_logs.os_netif_ok); else DBG_COUNTER(padapter->rx_logs.os_netif_err); } }
static u32 nps_enet_rx_handler(struct net_device *ndev) { u32 frame_len, err = 0; u32 work_done = 0; struct nps_enet_priv *priv = netdev_priv(ndev); struct sk_buff *skb; struct nps_enet_rx_ctl rx_ctrl; rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); frame_len = rx_ctrl.nr; /* Check if we got RX */ if (!rx_ctrl.cr) return work_done; /* If we got here there is a work for us */ work_done++; /* Check Rx error */ if (rx_ctrl.er) { ndev->stats.rx_errors++; err = 1; } /* Check Rx CRC error */ if (rx_ctrl.crc) { ndev->stats.rx_crc_errors++; ndev->stats.rx_dropped++; err = 1; } /* Check Frame length Min 64b */ if (unlikely(frame_len < ETH_ZLEN)) { ndev->stats.rx_length_errors++; ndev->stats.rx_dropped++; err = 1; } if (err) goto rx_irq_clean; /* Skb allocation */ skb = netdev_alloc_skb_ip_align(ndev, frame_len); if (unlikely(!skb)) { ndev->stats.rx_errors++; ndev->stats.rx_dropped++; goto rx_irq_clean; } /* Copy frame from Rx fifo into the skb */ nps_enet_read_rx_fifo(ndev, skb->data, frame_len); skb_put(skb, frame_len); skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_UNNECESSARY; ndev->stats.rx_packets++; ndev->stats.rx_bytes += frame_len; netif_receive_skb(skb); goto rx_irq_frame_done; rx_irq_clean: /* Clean Rx fifo */ nps_enet_clean_rx_fifo(ndev, frame_len); rx_irq_frame_done: /* Ack Rx ctrl register */ nps_enet_reg_set(priv, NPS_ENET_REG_RX_CTL, 0); return work_done; }
static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) { struct net_device *dev = (struct net_device *)vcc->proto_data; struct sk_buff *new_skb; eg_cache_entry *eg; struct mpoa_client *mpc; __be32 tag; char *tmp; ddprintk("mpoa: (%s) mpc_push:\n", dev->name); if (skb == NULL) { dprintk("mpoa: (%s) mpc_push: null skb, closing VCC\n", dev->name); mpc_vcc_close(vcc, dev); return; } skb->dev = dev; if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) { struct sock *sk = sk_atm(vcc); dprintk("mpoa: (%s) mpc_push: control packet arrived\n", dev->name); /* Pass control packets to daemon */ skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); return; } /* data coming over the shortcut */ atm_return(vcc, skb->truesize); mpc = find_mpc_by_lec(dev); if (mpc == NULL) { printk("mpoa: (%s) mpc_push: unknown MPC\n", dev->name); return; } if (memcmp(skb->data, &llc_snap_mpoa_data_tagged, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */ ddprintk("mpoa: (%s) mpc_push: tagged data packet arrived\n", dev->name); } else if (memcmp(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */ printk("mpoa: (%s) mpc_push: non-tagged data packet arrived\n", dev->name); printk(" mpc_push: non-tagged data unsupported, purging\n"); dev_kfree_skb_any(skb); return; } else { printk("mpoa: (%s) mpc_push: garbage arrived, purging\n", dev->name); dev_kfree_skb_any(skb); return; } tmp = skb->data + sizeof(struct llc_snap_hdr); tag = *(__be32 *)tmp; eg = mpc->eg_ops->get_by_tag(tag, mpc); if (eg == NULL) { printk("mpoa: (%s) mpc_push: Didn't find egress cache entry, tag = %u\n", dev->name,tag); purge_egress_shortcut(vcc, NULL); dev_kfree_skb_any(skb); return; } /* * See if ingress MPC is using shortcut we opened as a return channel. * This means we have a bi-directional vcc opened by us. */ if (eg->shortcut == NULL) { eg->shortcut = vcc; printk("mpoa: (%s) mpc_push: egress SVC in use\n", dev->name); } skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag)); /* get rid of LLC/SNAP header */ new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length); /* LLC/SNAP is shorter than MAC header :( */ dev_kfree_skb_any(skb); if (new_skb == NULL){ mpc->eg_ops->put(eg); return; } skb_push(new_skb, eg->ctrl_info.DH_length); /* add MAC header */ skb_copy_to_linear_data(new_skb, eg->ctrl_info.DLL_header, eg->ctrl_info.DH_length); new_skb->protocol = eth_type_trans(new_skb, dev); skb_reset_network_header(new_skb); eg->latest_ip_addr = ip_hdr(new_skb)->saddr; eg->packets_rcvd++; mpc->eg_ops->put(eg); memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(new_skb); return; }
/* During a receive, the cur_rx points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, * effectively tossing the packet. */ static void fec_enet_rx(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; ushort pkt_len; __u8 *data; #ifdef CONFIG_M532x flush_cache_all(); #endif spin_lock(&fep->hw_lock); /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ bdp = fep->cur_rx; while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { /* Since we have allocated space to hold a complete frame, * the last indicator should be set. */ if ((status & BD_ENET_RX_LAST) == 0) printk("FEC ENET: rcv is not +last\n"); if (!fep->opened) goto rx_processing_done; /* Check for errors. */ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { ndev->stats.rx_errors++; if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { /* Frame too long or too short. */ ndev->stats.rx_length_errors++; } if (status & BD_ENET_RX_NO) /* Frame alignment */ ndev->stats.rx_frame_errors++; if (status & BD_ENET_RX_CR) /* CRC Error */ ndev->stats.rx_crc_errors++; if (status & BD_ENET_RX_OV) /* FIFO overrun */ ndev->stats.rx_fifo_errors++; } /* Report late collisions as a frame error. * On this error, the BD is closed, but we don't know what we * have in the buffer. So, just drop this frame on the floor. */ if (status & BD_ENET_RX_CL) { ndev->stats.rx_errors++; ndev->stats.rx_frame_errors++; goto rx_processing_done; } /* Process the incoming frame. */ ndev->stats.rx_packets++; pkt_len = bdp->cbd_datlen; ndev->stats.rx_bytes += pkt_len; data = (__u8*)__va(bdp->cbd_bufaddr); dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, pkt_len); /* This does 16 byte alignment, exactly what we need. * The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up * bridging applications. */ skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); if (unlikely(!skb)) { printk("%s: Memory squeeze, dropping packet.\n", ndev->name); ndev->stats.rx_dropped++; } else { skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, pkt_len - 4); /* Make room */ skb_copy_to_linear_data(skb, data, pkt_len - 4); skb->protocol = eth_type_trans(skb, ndev); if (!skb_defer_rx_timestamp(skb)) netif_rx(skb); } bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; /* Mark the buffer empty */ status |= BD_ENET_RX_EMPTY; bdp->cbd_sc = status; /* Update BD pointer to next entry */ if (status & BD_ENET_RX_WRAP) bdp = fep->rx_bd_base; else bdp++; /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. */ writel(0, fep->hwp + FEC_R_DES_ACTIVE); } fep->cur_rx = bdp; spin_unlock(&fep->hw_lock); }
static void usb_net_raw_ip_rx_urb_comp(struct urb *urb) { struct baseband_usb *usb = (struct baseband_usb *) urb->context; int i = usb->baseband_index; struct sk_buff *skb; unsigned char *dst; unsigned char ethernet_header[14] = { /* Destination MAC */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Source MAC */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* EtherType */ NET_IP_ETHERTYPE, }; pr_debug("usb_net_raw_ip_rx_urb_comp { urb %p\n", urb); /* check input */ if (!urb) { pr_err("no urb\n"); return; } switch (urb->status) { case 0: break; case -ENOENT: /* fall through */ case -ESHUTDOWN: /* fall through */ case -EPROTO: pr_info("%s: rx urb %p - link shutdown %d\n", __func__, urb, urb->status); goto err_exit; default: pr_info("%s: rx urb %p - status %d\n", __func__, urb, urb->status); break; } /* put rx urb data in rx buffer */ if (urb->actual_length) { pr_debug("usb_net_raw_ip_rx_urb_comp - " "urb->actual_length %d\n", urb->actual_length); /* allocate skb with space for * - dummy ethernet header * - rx IP packet from modem */ skb = netdev_alloc_skb(usb_net_raw_ip_dev[i], NET_IP_ALIGN + 14 + urb->actual_length); if (skb) { /* generate a dummy ethernet header * since modem sends IP packets without * any ethernet headers */ memcpy(ethernet_header + 0, usb_net_raw_ip_dev[i]->dev_addr, 6); memcpy(ethernet_header + 6, "0x01\0x02\0x03\0x04\0x05\0x06", 6); /* fill skb with * - dummy ethernet header * - rx IP packet from modem */ skb_reserve(skb, NET_IP_ALIGN); dst = skb_put(skb, 14); memcpy(dst, ethernet_header, 14); dst = skb_put(skb, urb->actual_length); memcpy(dst, urb->transfer_buffer, urb->actual_length); skb->protocol = eth_type_trans(skb, usb_net_raw_ip_dev[i]); /* pass skb to network stack */ if (netif_rx(skb) < 0) { pr_err("usb_net_raw_ip_rx_urb_comp_work - " "netif_rx(%p) failed\n", skb); kfree_skb(skb); } } else { pr_err("usb_net_raw_ip_rx_urb_comp_work - " "netdev_alloc_skb() failed\n"); } } /* mark rx urb complete */ usb->usb.rx_urb = (struct urb *) 0; /* submit next rx urb */ usb_net_raw_ip_rx_urb_submit(usb); return; err_exit: /* mark rx urb complete */ usb->usb.rx_urb = (struct urb *) 0; pr_debug("usb_net_raw_ip_rx_urb_comp }\n"); return; }
irqreturn_t nic_8019_rx(int irq, void *dev_id, struct pt_regs *regs) { u8 RxPageBeg, RxPageEnd; u8 RxNextPage; u8 RxStatus; #ifdef RTL8019_OP_16 u16 *data,temp; #else u8 *data; #endif u16 i, RxLength,RxLen; struct sk_buff *skb; struct net_device *dev = (struct net_device *) dev_id; struct nic_8019_priv *priv = (struct nic_8019_priv *) dev->priv; TRACE("TX/RX Interupt!\n"); spin_lock(&priv->lock); SetRegPage(0); outportb(BNRY, rBNRY); //??? RxStatus = inportb(ISR); if (RxStatus & 2) { outportb(ISR, 0x2); //clr TX interupt priv->stats.tx_packets++; TRACE("transmit one packet complete!\n"); } if (RxStatus & 1) { readpacket: TRACE("Receivex packet....\n"); outportb(ISR, 0x1); //clr Rx interupt SetRegPage(1); RxPageEnd = inportb(CURR); SetRegPage(0); RxPageBeg = rBNRY+1; if(RxPageBeg>=RPSTOP) RxPageBeg = RPSTART; outportb(BaseAddr, 0x22); // stop remote dma //outport(RSAR0, RxPageBeg<<8); //outport(RBCR0, 256); outportb(RSAR0, 0); outportb(RSAR1, RxPageBeg); outportb(RBCR0, 4); outportb(RBCR1, 0); outportb(BaseAddr, 0xa); #ifdef RTL8019_OP_16 temp = inportw(RWPORT); RxNextPage = temp>>8; RxStatus = temp&0xff; RxLength = inportw(RWPORT); #else RxStatus = inportb(RWPORT); RxNextPage = inportb(RWPORT); RxLength = inportb(RWPORT); RxLength |= inportb(RWPORT)<<8; #endif TRACE("\nRxBeg = %x, RxEnd = %x, nextpage = %x, size = %i\n", RxPageBeg, RxPageEnd, RxNextPage, RxLength); RxLength -= 4; if (RxLength>ETH_FRAME_LEN) { if (RxPageEnd==RPSTART) rBNRY = RPSTOP-1; else rBNRY = RxPageEnd-1; outportb(BNRY, rBNRY); TRACE("RxLength more long than %x\n", ETH_FRAME_LEN); return IRQ_HANDLED; } skb = dev_alloc_skb(RxLength+2); if (!skb) { TRACE("Rtl8019as eth: low on mem - packet dropped\n"); priv->stats.rx_dropped++; return IRQ_HANDLED; } skb->dev = dev; skb_reserve(skb, 2); skb_put(skb, RxLength); #ifdef RTL8019_OP_16 data = ( u16 *)skb->data; #else data = ( u8 *)skb->data; #endif // eth_copy_and_sum(skb, data, len, 0); outportb(RSAR0, 4); outportb(RSAR1, RxPageBeg); outportb(RBCR0, RxLength); outportb(RBCR1, RxLength>>8); outportb(BaseAddr, 0xa); #ifdef RTL8019_OP_16 i = 2; data -= 2; RxLen=(RxLength+1)/2; #else i = 4; data -= 4; RxLen=RxLength; #endif for(; RxLen--;) { #ifdef RTL8019_OP_16 static const int cmp_val = 0x7f; #else static const int cmp_val = 0xff; #endif if (!(i & cmp_val)) { outportb(BNRY, RxPageBeg); RxPageBeg++; if(RxPageBeg>=RPSTOP) RxPageBeg = RPSTART; } #ifdef RTL8019_OP_16 data[i++] = inportw(RWPORT); TRACE("%2X,%2X,", data[i-1]&0xff,data[i-1]>>8); #else data[i++] = inportb(RWPORT); TRACE("%2X,", data[i-1]); #endif } TRACE("\n"); outportb(BNRY, RxPageBeg); rBNRY = RxPageBeg; skb->protocol = eth_type_trans(skb, dev); TRACE("\nprotocol=%x\n", skb->protocol); priv->stats.rx_packets++; priv->stats.rx_bytes +=RxLength; netif_rx(skb); /* Process all unread data */ if (RxPageEnd != RxNextPage) goto readpacket; } else {
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, const struct tnl_ptk_info *tpi, bool log_ecn_error) { struct pcpu_tstats *tstats; const struct iphdr *iph = ip_hdr(skb); int err; secpath_reset(skb); skb->protocol = tpi->proto; skb->mac_header = skb->network_header; __pskb_pull(skb, tunnel->hlen); skb_postpull_rcsum(skb, skb_transport_header(skb), tunnel->hlen); #ifdef CONFIG_NET_IPGRE_BROADCAST if (ipv4_is_multicast(iph->daddr)) { /* Looped back packet, drop it! */ if (rt_is_output_route(skb_rtable(skb))) goto drop; tunnel->dev->stats.multicast++; skb->pkt_type = PACKET_BROADCAST; } #endif if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) || ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) { tunnel->dev->stats.rx_crc_errors++; tunnel->dev->stats.rx_errors++; goto drop; } if (tunnel->parms.i_flags&TUNNEL_SEQ) { if (!(tpi->flags&TUNNEL_SEQ) || (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { tunnel->dev->stats.rx_fifo_errors++; tunnel->dev->stats.rx_errors++; goto drop; } tunnel->i_seqno = ntohl(tpi->seq) + 1; } /* Warning: All skb pointers will be invalidated! */ if (tunnel->dev->type == ARPHRD_ETHER) { if (!pskb_may_pull(skb, ETH_HLEN)) { tunnel->dev->stats.rx_length_errors++; tunnel->dev->stats.rx_errors++; goto drop; } iph = ip_hdr(skb); skb->protocol = eth_type_trans(skb, tunnel->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); } skb->pkt_type = PACKET_HOST; __skb_tunnel_rx(skb, tunnel->dev); skb_reset_network_header(skb); err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { if (log_ecn_error) net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", &iph->saddr, iph->tos); if (err > 1) { ++tunnel->dev->stats.rx_frame_errors; ++tunnel->dev->stats.rx_errors; goto drop; } } tstats = this_cpu_ptr(tunnel->dev->tstats); u64_stats_update_begin(&tstats->syncp); tstats->rx_packets++; tstats->rx_bytes += skb->len; u64_stats_update_end(&tstats->syncp); gro_cells_receive(&tunnel->gro_cells, skb); return 0; drop: kfree_skb(skb); return 0; }
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, const struct tnl_ptk_info *tpi, bool log_ecn_error) { struct pcpu_sw_netstats *tstats; const struct iphdr *iph = ip_hdr(skb); int err; #ifdef CONFIG_NET_IPGRE_BROADCAST if (ipv4_is_multicast(iph->daddr)) { tunnel->dev->stats.multicast++; skb->pkt_type = PACKET_BROADCAST; } #endif if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) || ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) { tunnel->dev->stats.rx_crc_errors++; tunnel->dev->stats.rx_errors++; goto drop; } if (tunnel->parms.i_flags&TUNNEL_SEQ) { if (!(tpi->flags&TUNNEL_SEQ) || (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { tunnel->dev->stats.rx_fifo_errors++; tunnel->dev->stats.rx_errors++; goto drop; } tunnel->i_seqno = ntohl(tpi->seq) + 1; } skb_reset_network_header(skb); err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { if (log_ecn_error) net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", &iph->saddr, iph->tos); if (err > 1) { ++tunnel->dev->stats.rx_frame_errors; ++tunnel->dev->stats.rx_errors; goto drop; } } tstats = this_cpu_ptr(tunnel->dev->tstats); u64_stats_update_begin(&tstats->syncp); tstats->rx_packets++; tstats->rx_bytes += skb->len; u64_stats_update_end(&tstats->syncp); skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); if (tunnel->dev->type == ARPHRD_ETHER) { skb->protocol = eth_type_trans(skb, tunnel->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); } else { skb->dev = tunnel->dev; } gro_cells_receive(&tunnel->gro_cells, skb); return 0; drop: kfree_skb(skb); return 0; }
UINT32 (*RALINK_FP_Handle)(PNDIS_PACKET pPacket); EXPORT_SYMBOL(RALINK_FP_Handle); packet_forward() { UINT32 HandRst = 1; ...... if (RALINK_FP_Handle != NULL) HandRst = RALINK_FP_Handle(skb); if (HandRst != 0) { /* pass the packet to upper layer */ skb->protocol = eth_type_trans(skb, skb->dev); netif_rx(skb); } } */ UINT32 BG_FTPH_PacketFromApHandle( IN PNDIS_PACKET pPacket); #ifdef BG_FT_OPEN_SUPPORT extern UINT32 (*RALINK_FP_Handle)(PNDIS_PACKET pPacket); #else UINT32 (*RALINK_FP_Handle)(PNDIS_PACKET pPacket); #endif /* BG_FT_OPEN_SUPPORT */
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, int weight) { u32 end_slot; int handled = 0; end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); end_slot &= BGMAC_DMA_RX_STATDPTR; end_slot -= ring->index_base; end_slot &= BGMAC_DMA_RX_STATDPTR; end_slot /= sizeof(struct bgmac_dma_desc); ring->end = end_slot; while (ring->start != ring->end) { struct device *dma_dev = bgmac->core->dma_dev; struct bgmac_slot_info *slot = &ring->slots[ring->start]; struct sk_buff *skb = slot->skb; struct bgmac_rx_header *rx; u16 len, flags; /* Unmap buffer to make it accessible to the CPU */ dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); /* Get info from the header */ rx = (struct bgmac_rx_header *)skb->data; len = le16_to_cpu(rx->len); flags = le16_to_cpu(rx->flags); do { dma_addr_t old_dma_addr = slot->dma_addr; int err; /* Check for poison and drop or pass the packet */ if (len == 0xdead && flags == 0xbeef) { bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", ring->start); dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); break; } /* Omit CRC. */ len -= ETH_FCS_LEN; /* Prepare new skb as replacement */ err = bgmac_dma_rx_skb_for_slot(bgmac, slot); if (err) { /* Poison the old skb */ rx->len = cpu_to_le16(0xdead); rx->flags = cpu_to_le16(0xbeef); dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); break; } bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); /* Unmap old skb, we'll pass it to the netfif */ dma_unmap_single(dma_dev, old_dma_addr, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); skb_put(skb, BGMAC_RX_FRAME_OFFSET + len); skb_pull(skb, BGMAC_RX_FRAME_OFFSET); skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, bgmac->net_dev); netif_receive_skb(skb); handled++; } while (0); if (++ring->start >= BGMAC_RX_RING_SLOTS) ring->start = 0; if (handled >= weight) /* Should never be greater */ break; } return handled; }
/* ---------------------------------------------------------------------------- mace_rx Receives packets. ---------------------------------------------------------------------------- */ static int mace_rx(struct net_device *dev, unsigned char RxCnt) { mace_private *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; unsigned char rx_framecnt; unsigned short rx_status; while ( ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) && (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */ (RxCnt--) ) { rx_status = inw(ioaddr + AM2150_RCV); pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status" " 0x%X.\n", dev->name, rx_framecnt, rx_status); if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */ lp->linux_stats.rx_errors++; if (rx_status & MACE_RCVFS_OFLO) { lp->mace_stats.oflo++; } if (rx_status & MACE_RCVFS_CLSN) { lp->mace_stats.clsn++; } if (rx_status & MACE_RCVFS_FRAM) { lp->mace_stats.fram++; } if (rx_status & MACE_RCVFS_FCS) { lp->mace_stats.fcs++; } } else { short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4; /* Auto Strip is off, always subtract 4 */ struct sk_buff *skb; lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV); /* runt packet count */ lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV); /* rcv collision count */ pr_debug(" receiving packet size 0x%X rx_status" " 0x%X.\n", pkt_len, rx_status); skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb != NULL) { skb_reserve(skb, 2); insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); if (pkt_len & 1) *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ lp->linux_stats.rx_packets++; lp->linux_stats.rx_bytes += pkt_len; outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ continue; } else { pr_debug("%s: couldn't allocate a sk_buff of size" " %d.\n", dev->name, pkt_len); lp->linux_stats.rx_dropped++; } }
void rtw_recv_indicatepkt(_adapter *padapter, union recv_frame *precv_frame) { struct recv_priv *precvpriv; _queue *pfree_recv_queue; _pkt *skb; struct mlme_priv*pmlmepriv = &padapter->mlmepriv; #ifdef CONFIG_RTL8712_TCP_CSUM_OFFLOAD_RX struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; #endif _func_enter_; precvpriv = &(padapter->recvpriv); pfree_recv_queue = &(precvpriv->free_recv_queue); #ifdef CONFIG_DRVEXT_MODULE if (drvext_rx_handler(padapter, precv_frame->u.hdr.rx_data, precv_frame->u.hdr.len) == _SUCCESS) { rtw_free_recvframe(precv_frame, pfree_recv_queue); return; } #endif skb = precv_frame->u.hdr.pkt; if(skb == NULL) { RT_TRACE(_module_recv_osdep_c_,_drv_err_,("rtw_recv_indicatepkt():skb==NULL something wrong!!!!\n")); goto _recv_indicatepkt_drop; } RT_TRACE(_module_recv_osdep_c_,_drv_info_,("rtw_recv_indicatepkt():skb != NULL !!!\n")); RT_TRACE(_module_recv_osdep_c_,_drv_info_,("rtw_recv_indicatepkt():precv_frame->u.hdr.rx_head=%p precv_frame->hdr.rx_data=%p\n", precv_frame->u.hdr.rx_head, precv_frame->u.hdr.rx_data)); RT_TRACE(_module_recv_osdep_c_,_drv_info_,("precv_frame->hdr.rx_tail=%p precv_frame->u.hdr.rx_end=%p precv_frame->hdr.len=%d \n", precv_frame->u.hdr.rx_tail, precv_frame->u.hdr.rx_end, precv_frame->u.hdr.len)); skb->data = precv_frame->u.hdr.rx_data; #ifdef NET_SKBUFF_DATA_USES_OFFSET skb_set_tail_pointer(skb, precv_frame->u.hdr.len); #else skb->tail = precv_frame->u.hdr.rx_tail; #endif skb->len = precv_frame->u.hdr.len; RT_TRACE(_module_recv_osdep_c_,_drv_info_,("\n skb->head=%p skb->data=%p skb->tail=%p skb->end=%p skb->len=%d\n", skb->head, skb->data, skb->tail, skb->end, skb->len)); if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE) { _pkt *pskb2=NULL; struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; int bmcast = IS_MCAST(pattrib->dst); //DBG_871X("bmcast=%d\n", bmcast); if(_rtw_memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN)==_FALSE) { psta = rtw_get_stainfo(pstapriv, pattrib->dst); //DBG_871X("not ap psta=%p, addr=%pM\n", psta, pattrib->dst); if(bmcast) { pskb2 = skb_clone(skb, GFP_ATOMIC); } if(psta) { //DBG_871X("directly forwarding to the xmit_entry\n"); //skb->ip_summed = CHECKSUM_NONE; //skb->protocol = eth_type_trans(skb, pnetdev); skb->dev = padapter->pnetdev; rtw_xmit_entry(skb, padapter->pnetdev); if(bmcast == _FALSE) goto _recv_indicatepkt_end; } if(bmcast) skb = pskb2; } else// to APself { //DBG_871X("to APSelf\n"); } } #ifdef CONFIG_RTL8712_TCP_CSUM_OFFLOAD_RX if ( (pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1) ) { skb->ip_summed = CHECKSUM_UNNECESSARY; //printk("CHECKSUM_UNNECESSARY \n"); } else { skb->ip_summed = CHECKSUM_NONE; //printk("CHECKSUM_NONE(%d, %d) \n", pattrib->tcpchk_valid, pattrib->tcp_chkrpt); } #else /* !CONFIG_RTL8712_TCP_CSUM_OFFLOAD_RX */ skb->ip_summed = CHECKSUM_NONE; #endif skb->dev = padapter->pnetdev; skb->protocol = eth_type_trans(skb, padapter->pnetdev); netif_rx(skb); _recv_indicatepkt_end: precv_frame->u.hdr.pkt = NULL; // pointers to NULL before rtw_free_recvframe() rtw_free_recvframe(precv_frame, pfree_recv_queue); RT_TRACE(_module_recv_osdep_c_,_drv_info_,("\n rtw_recv_indicatepkt :after netif_rx!!!!\n")); _func_exit_; return; _recv_indicatepkt_drop: //enqueue back to free_recv_queue if(precv_frame) rtw_free_recvframe(precv_frame, pfree_recv_queue); precvpriv->rx_drop++; _func_exit_; }
/* * We have a good packet(s), get it/them out of the buffers. * * cgg - this driver works by creating (once) a circular list of receiver * DMA descriptors that will be used serially by the Banyan. * Because the descriptors are never unlinked from the list _they * are always live_. We are counting on Linux (and the chosen number * of buffers) to keep ahead of the hardware otherwise the same * descriptor might be used for more than one reception. */ static void acacia_rx(struct net_device *dev) { struct acacia_local* lp = (struct acacia_local *)dev->priv; volatile DMAD_t rd = &lp->rd_ring[lp->rx_next_out]; struct sk_buff *skb; u8* pkt_buf; u32 devcs; u32 count, pkt_len; /* cgg - keep going while we have received into more descriptors */ while (IS_DMA_USED(rd->control)) { devcs = rd->devcs; pkt_len = RCVPKT_LENGTH(devcs); pkt_buf = &lp->rba[lp->rx_next_out * ACACIA_RBSIZE]; /* * cgg - RESET the address pointer later - if we get a second * reception it will occur in the remains of the current * area of memory - protected by the diminished DMA count. */ /* * Due to a bug in banyan processor, the packet length * given by devcs field and count field sometimes differ. * If that is the case, report Error. */ count = ACACIA_RBSIZE - (u32)DMA_COUNT(rd->control); if( count != pkt_len) { lp->stats.rx_errors++; } else if (count < 64) { lp->stats.rx_errors++; } else if ((devcs & (/*ETHERDMA_IN_FD |*/ ETHRX_ld_m)) != (/*ETHERDMA_IN_FD |*/ ETHRX_ld_m)) { /* cgg - check that this is a whole packet */ /* WARNING: DMA_FD bit incorrectly set in Acacia (errata ref #077) */ lp->stats.rx_errors++; lp->stats.rx_over_errors++; } else if (devcs & ETHRX_rok_m) { /* must be the (first and) last descriptor then */ /* Malloc up new buffer. */ skb = dev_alloc_skb(pkt_len+2); if (skb == NULL) { err("no memory, dropping rx packet.\n"); lp->stats.rx_dropped++; } else { /* else added by cgg - used to fall through! */ /* invalidate the cache before copying the buffer */ dma_cache_inv((unsigned long)pkt_buf, pkt_len); skb->dev = dev; skb_reserve(skb, 2); /* 16 bit align */ skb_put(skb, pkt_len); /* Make room */ eth_copy_and_sum(skb, pkt_buf, pkt_len, 0); skb->protocol = eth_type_trans(skb, dev); /* pass the packet to upper layers */ netif_rx(skb); dev->last_rx = jiffies; lp->stats.rx_packets++; lp->stats.rx_bytes += pkt_len; if (IS_RCV_MP(devcs)) lp->stats.multicast++; } } else { /* This should only happen if we enable accepting broken packets */ lp->stats.rx_errors++; /* cgg - (re-)added statistics counters */ if (IS_RCV_CRC_ERR(devcs)) { dbg(2, "RX CRC error\n"); lp->stats.rx_crc_errors++; } else { if (IS_RCV_LOR_ERR(devcs)) { dbg(2, "RX LOR error\n"); lp->stats.rx_length_errors++; } if (IS_RCV_LE_ERR(devcs)) { dbg(2, "RX LE error\n"); lp->stats.rx_length_errors++; } } if (IS_RCV_OVR_ERR(devcs)) { /* * The overflow errors are handled through * an interrupt handler. */ lp->stats.rx_over_errors++; } /* code violation */ if (IS_RCV_CV_ERR(devcs)) { dbg(2, "RX CV error\n"); lp->stats.rx_frame_errors++; } if (IS_RCV_CES_ERR(devcs)) { dbg(2, "RX Preamble error\n"); } } /* reset descriptor's curr_addr */ rd->ca = virt_to_phys(pkt_buf); /* * cgg - clear the bits that let us see whether this * descriptor has been used or not & reset reception * length. */ rd->control = DMAD_iod_m | DMA_COUNT(ACACIA_RBSIZE); rd->devcs = 0; lp->rx_next_out = (lp->rx_next_out + 1) & ACACIA_RDS_MASK; rd = &lp->rd_ring[lp->rx_next_out]; /* * we'll deal with all possible interrupts up to the last * used descriptor - so cancel any interrupts that may have * arrisen while we've been processing. */ writel(0, &lp->rx_dma_regs->dmas); } /* * If any worth-while packets have been received, dev_rint() * has done a mark_bh(NET_BH) for us and will work on them * when we get to the bottom-half routine. */ }
/* Received a packet and pass to upper layer */ static void emac_rx(struct net_device *dev) { struct emac_board_info *db = netdev_priv(dev); struct sk_buff *skb; u8 *rdptr; bool good_packet; static int rxlen_last; unsigned int reg_val; u32 rxhdr, rxstatus, rxcount, rxlen; /* Check packet ready or not */ while (1) { /* race warning: the first packet might arrive with * the interrupts disabled, but the second will fix * it */ rxcount = readl(db->membase + EMAC_RX_FBC_REG); if (netif_msg_rx_status(db)) dev_dbg(db->dev, "RXCount: %x\n", rxcount); if ((db->skb_last != NULL) && (rxlen_last > 0)) { dev->stats.rx_bytes += rxlen_last; /* Pass to upper layer */ db->skb_last->protocol = eth_type_trans(db->skb_last, dev); netif_rx(db->skb_last); dev->stats.rx_packets++; db->skb_last = NULL; rxlen_last = 0; reg_val = readl(db->membase + EMAC_RX_CTL_REG); reg_val &= ~EMAC_RX_CTL_DMA_EN; writel(reg_val, db->membase + EMAC_RX_CTL_REG); } if (!rxcount) { db->emacrx_completed_flag = 1; reg_val = readl(db->membase + EMAC_INT_CTL_REG); reg_val |= (0xf << 0) | (0x01 << 8); writel(reg_val, db->membase + EMAC_INT_CTL_REG); /* had one stuck? */ rxcount = readl(db->membase + EMAC_RX_FBC_REG); if (!rxcount) return; } reg_val = readl(db->membase + EMAC_RX_IO_DATA_REG); if (netif_msg_rx_status(db)) dev_dbg(db->dev, "receive header: %x\n", reg_val); if (reg_val != EMAC_UNDOCUMENTED_MAGIC) { /* disable RX */ reg_val = readl(db->membase + EMAC_CTL_REG); writel(reg_val & ~EMAC_CTL_RX_EN, db->membase + EMAC_CTL_REG); /* Flush RX FIFO */ reg_val = readl(db->membase + EMAC_RX_CTL_REG); writel(reg_val | (1 << 3), db->membase + EMAC_RX_CTL_REG); do { reg_val = readl(db->membase + EMAC_RX_CTL_REG); } while (reg_val & (1 << 3)); /* enable RX */ reg_val = readl(db->membase + EMAC_CTL_REG); writel(reg_val | EMAC_CTL_RX_EN, db->membase + EMAC_CTL_REG); reg_val = readl(db->membase + EMAC_INT_CTL_REG); reg_val |= (0xf << 0) | (0x01 << 8); writel(reg_val, db->membase + EMAC_INT_CTL_REG); db->emacrx_completed_flag = 1; return; } /* A packet ready now & Get status/length */ good_packet = true; emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG, &rxhdr, sizeof(rxhdr)); if (netif_msg_rx_status(db)) dev_dbg(db->dev, "rxhdr: %x\n", *((int *)(&rxhdr))); rxlen = EMAC_RX_IO_DATA_LEN(rxhdr); rxstatus = EMAC_RX_IO_DATA_STATUS(rxhdr); if (netif_msg_rx_status(db)) dev_dbg(db->dev, "RX: status %02x, length %04x\n", rxstatus, rxlen); /* Packet Status check */ if (rxlen < 0x40) { good_packet = false; if (netif_msg_rx_err(db)) dev_dbg(db->dev, "RX: Bad Packet (runt)\n"); } if (unlikely(!(rxstatus & EMAC_RX_IO_DATA_STATUS_OK))) { good_packet = false; if (rxstatus & EMAC_RX_IO_DATA_STATUS_CRC_ERR) { if (netif_msg_rx_err(db)) dev_dbg(db->dev, "crc error\n"); dev->stats.rx_crc_errors++; } if (rxstatus & EMAC_RX_IO_DATA_STATUS_LEN_ERR) { if (netif_msg_rx_err(db)) dev_dbg(db->dev, "length error\n"); dev->stats.rx_length_errors++; } } /* Move data from EMAC */ if (good_packet) { skb = netdev_alloc_skb(dev, rxlen + 4); if (!skb) continue; skb_reserve(skb, 2); rdptr = (u8 *) skb_put(skb, rxlen - 4); /* Read received packet from RX SRAM */ if (netif_msg_rx_status(db)) dev_dbg(db->dev, "RxLen %x\n", rxlen); emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG, rdptr, rxlen); dev->stats.rx_bytes += rxlen; /* Pass to upper layer */ skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; } } }
/*---------------------------------------------------------------- * p80211pb_80211_to_ether * * Uses the contents of a received 802.11 frame and the etherconv * setting to build an ether frame. * * This function extracts the src and dest address from the 802.11 * frame to use in the construction of the eth frame. * * Arguments: * ethconv Conversion type to perform * skb Packet buffer containing the 802.11 frame * * Returns: * 0 on success, non-zero otherwise * * Call context: * May be called in interrupt or non-interrupt context ----------------------------------------------------------------*/ int skb_p80211_to_ether(wlandevice_t * wlandev, u32 ethconv, struct sk_buff *skb) { netdevice_t *netdev = wlandev->netdev; u16 fc; unsigned int payload_length; unsigned int payload_offset; u8 daddr[WLAN_ETHADDR_LEN]; u8 saddr[WLAN_ETHADDR_LEN]; p80211_hdr_t *w_hdr; wlan_ethhdr_t *e_hdr; wlan_llc_t *e_llc; wlan_snap_t *e_snap; int foo; payload_length = skb->len - WLAN_HDR_A3_LEN - WLAN_CRC_LEN; payload_offset = WLAN_HDR_A3_LEN; w_hdr = (p80211_hdr_t *) skb->data; /* setup some vars for convenience */ fc = le16_to_cpu(w_hdr->a3.fc); if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 0)) { memcpy(daddr, w_hdr->a3.a1, WLAN_ETHADDR_LEN); memcpy(saddr, w_hdr->a3.a2, WLAN_ETHADDR_LEN); } else if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 1)) { memcpy(daddr, w_hdr->a3.a1, WLAN_ETHADDR_LEN); memcpy(saddr, w_hdr->a3.a3, WLAN_ETHADDR_LEN); } else if ((WLAN_GET_FC_TODS(fc) == 1) && (WLAN_GET_FC_FROMDS(fc) == 0)) { memcpy(daddr, w_hdr->a3.a3, WLAN_ETHADDR_LEN); memcpy(saddr, w_hdr->a3.a2, WLAN_ETHADDR_LEN); } else { payload_offset = WLAN_HDR_A4_LEN; if (payload_length < WLAN_HDR_A4_LEN - WLAN_HDR_A3_LEN) { printk(KERN_ERR "A4 frame too short!\n"); return 1; } payload_length -= (WLAN_HDR_A4_LEN - WLAN_HDR_A3_LEN); memcpy(daddr, w_hdr->a4.a3, WLAN_ETHADDR_LEN); memcpy(saddr, w_hdr->a4.a4, WLAN_ETHADDR_LEN); } /* perform de-wep if necessary.. */ if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) && WLAN_GET_FC_ISWEP(fc) && (wlandev->hostwep & HOSTWEP_DECRYPT)) { if (payload_length <= 8) { printk(KERN_ERR "WEP frame too short (%u).\n", skb->len); return 1; } if ((foo = wep_decrypt(wlandev, skb->data + payload_offset + 4, payload_length - 8, -1, skb->data + payload_offset, skb->data + payload_offset + payload_length - 4))) { /* de-wep failed, drop skb. */ pr_debug("Host de-WEP failed, dropping frame (%d).\n", foo); wlandev->rx.decrypt_err++; return 2; } /* subtract the IV+ICV length off the payload */ payload_length -= 8; /* chop off the IV */ skb_pull(skb, 4); /* chop off the ICV. */ skb_trim(skb, skb->len - 4); wlandev->rx.decrypt++; } e_hdr = (wlan_ethhdr_t *) (skb->data + payload_offset); e_llc = (wlan_llc_t *) (skb->data + payload_offset); e_snap = (wlan_snap_t *) (skb->data + payload_offset + sizeof(wlan_llc_t)); /* Test for the various encodings */ if ((payload_length >= sizeof(wlan_ethhdr_t)) && (e_llc->dsap != 0xaa || e_llc->ssap != 0xaa) && ((memcmp(daddr, e_hdr->daddr, WLAN_ETHADDR_LEN) == 0) || (memcmp(saddr, e_hdr->saddr, WLAN_ETHADDR_LEN) == 0))) { pr_debug("802.3 ENCAP len: %d\n", payload_length); /* 802.3 Encapsulated */ /* Test for an overlength frame */ if (payload_length > (netdev->mtu + WLAN_ETHHDR_LEN)) { /* A bogus length ethfrm has been encap'd. */ /* Is someone trying an oflow attack? */ printk(KERN_ERR "ENCAP frame too large (%d > %d)\n", payload_length, netdev->mtu + WLAN_ETHHDR_LEN); return 1; } /* Chop off the 802.11 header. it's already sane. */ skb_pull(skb, payload_offset); /* chop off the 802.11 CRC */ skb_trim(skb, skb->len - WLAN_CRC_LEN); } else if ((payload_length >= sizeof(wlan_llc_t) + sizeof(wlan_snap_t)) && (e_llc->dsap == 0xaa) && (e_llc->ssap == 0xaa) && (e_llc->ctl == 0x03) && (((memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) == 0) && (ethconv == WLAN_ETHCONV_8021h) && (p80211_stt_findproto(le16_to_cpu(e_snap->type)))) || (memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) != 0))) { pr_debug("SNAP+RFC1042 len: %d\n", payload_length); /* it's a SNAP + RFC1042 frame && protocol is in STT */ /* build 802.3 + RFC1042 */ /* Test for an overlength frame */ if (payload_length > netdev->mtu) { /* A bogus length ethfrm has been sent. */ /* Is someone trying an oflow attack? */ printk(KERN_ERR "SNAP frame too large (%d > %d)\n", payload_length, netdev->mtu); return 1; } /* chop 802.11 header from skb. */ skb_pull(skb, payload_offset); /* create 802.3 header at beginning of skb. */ e_hdr = (wlan_ethhdr_t *) skb_push(skb, WLAN_ETHHDR_LEN); memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN); memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN); e_hdr->type = htons(payload_length); /* chop off the 802.11 CRC */ skb_trim(skb, skb->len - WLAN_CRC_LEN); } else if ((payload_length >= sizeof(wlan_llc_t) + sizeof(wlan_snap_t)) && (e_llc->dsap == 0xaa) && (e_llc->ssap == 0xaa) && (e_llc->ctl == 0x03)) { pr_debug("802.1h/RFC1042 len: %d\n", payload_length); /* it's an 802.1h frame || (an RFC1042 && protocol is not in STT) */ /* build a DIXII + RFC894 */ /* Test for an overlength frame */ if ((payload_length - sizeof(wlan_llc_t) - sizeof(wlan_snap_t)) > netdev->mtu) { /* A bogus length ethfrm has been sent. */ /* Is someone trying an oflow attack? */ printk(KERN_ERR "DIXII frame too large (%ld > %d)\n", (long int)(payload_length - sizeof(wlan_llc_t) - sizeof(wlan_snap_t)), netdev->mtu); return 1; } /* chop 802.11 header from skb. */ skb_pull(skb, payload_offset); /* chop llc header from skb. */ skb_pull(skb, sizeof(wlan_llc_t)); /* chop snap header from skb. */ skb_pull(skb, sizeof(wlan_snap_t)); /* create 802.3 header at beginning of skb. */ e_hdr = (wlan_ethhdr_t *) skb_push(skb, WLAN_ETHHDR_LEN); e_hdr->type = e_snap->type; memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN); memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN); /* chop off the 802.11 CRC */ skb_trim(skb, skb->len - WLAN_CRC_LEN); } else { pr_debug("NON-ENCAP len: %d\n", payload_length); /* any NON-ENCAP */ /* it's a generic 80211+LLC or IPX 'Raw 802.3' */ /* build an 802.3 frame */ /* allocate space and setup hostbuf */ /* Test for an overlength frame */ if (payload_length > netdev->mtu) { /* A bogus length ethfrm has been sent. */ /* Is someone trying an oflow attack? */ printk(KERN_ERR "OTHER frame too large (%d > %d)\n", payload_length, netdev->mtu); return 1; } /* Chop off the 802.11 header. */ skb_pull(skb, payload_offset); /* create 802.3 header at beginning of skb. */ e_hdr = (wlan_ethhdr_t *) skb_push(skb, WLAN_ETHHDR_LEN); memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN); memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN); e_hdr->type = htons(payload_length); /* chop off the 802.11 CRC */ skb_trim(skb, skb->len - WLAN_CRC_LEN); } /* * Note that eth_type_trans() expects an skb w/ skb->data pointing * at the MAC header, it then sets the following skb members: * skb->mac_header, * skb->data, and * skb->pkt_type. * It then _returns_ the value that _we're_ supposed to stuff in * skb->protocol. This is nuts. */ skb->protocol = eth_type_trans(skb, netdev); /* jkriegl: process signal and noise as set in hfa384x_int_rx() */ /* jkriegl: only process signal/noise if requested by iwspy */ if (wlandev->spy_number) orinoco_spy_gather(wlandev, eth_hdr(skb)->h_source, P80211SKB_RXMETA(skb)); /* Free the metadata */ p80211skb_rxmeta_detach(skb); return 0; }
static void ni52_rcv_int(struct net_device *dev) { int status,cnt=0; unsigned short totlen; struct sk_buff *skb; struct rbd_struct *rbd; struct priv *p = (struct priv *) dev->priv; if(debuglevel > 0) printk("R"); for(;(status = p->rfd_top->stat_high) & RFD_COMPL;) { rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); if(status & RFD_OK) /* frame received without error? */ { if( (totlen = rbd->status) & RBD_LAST) /* the first and the last buffer? */ { totlen &= RBD_MASK; /* length of this frame */ rbd->status = 0; skb = (struct sk_buff *) dev_alloc_skb(totlen+2); if(skb != NULL) { skb_reserve(skb,2); skb_put(skb,totlen); eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); p->stats.rx_packets++; p->stats.rx_bytes += totlen; } else p->stats.rx_dropped++; } else { int rstat; /* free all RBD's until RBD_LAST is set */ totlen = 0; while(!((rstat=rbd->status) & RBD_LAST)) { totlen += rstat & RBD_MASK; if(!rstat) { printk("%s: Whoops .. no end mark in RBD list\n",dev->name); break; } rbd->status = 0; rbd = (struct rbd_struct *) make32(rbd->next); } totlen += rstat & RBD_MASK; rbd->status = 0; printk("%s: received oversized frame! length: %d\n",dev->name,totlen); p->stats.rx_dropped++; } } else /* frame !(ok), only with 'save-bad-frames' */ { printk("%s: oops! rfd-error-status: %04x\n",dev->name,status); p->stats.rx_errors++; } p->rfd_top->stat_high = 0; p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */ p->rfd_top->rbd_offset = 0xffff; p->rfd_last->last = 0; /* delete RFD_SUSP */ p->rfd_last = p->rfd_top; p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ p->scb->rfa_offset = make16(p->rfd_top); if(debuglevel > 0) printk("%d",cnt++); } if(automatic_resume) { WAIT_4_SCB_CMD(); p->scb->cmd_ruc = RUC_RESUME; ni_attn586(); WAIT_4_SCB_CMD_RUC(); } #ifdef WAIT_4_BUSY { int i; for(i=0;i<1024;i++) { if(p->rfd_top->status) break; DELAY_16(); if(i == 1023) printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name); } } #endif #if 0 if(!at_least_one) { int i; volatile struct rfd_struct *rfds=p->rfd_top; volatile struct rbd_struct *rbds; printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least); for(i=0;i< (p->num_recv_buffs+4);i++) { rbds = (struct rbd_struct *) make32(rfds->rbd_offset); printk("%04x:%04x ",rfds->status,rbds->status); rfds = (struct rfd_struct *) make32(rfds->next); } printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status); printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus); } old_at_least = at_least_one; #endif if(debuglevel > 0) printk("r"); }
int rtw_recv_indicatepkt(struct adapter *padapter, struct recv_frame *precv_frame) { struct recv_priv *precvpriv; struct __queue *pfree_recv_queue; struct sk_buff *skb; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; precvpriv = &(padapter->recvpriv); pfree_recv_queue = &(precvpriv->free_recv_queue); skb = precv_frame->pkt; if (skb == NULL) { RT_TRACE(_module_recv_osdep_c_, _drv_err_, ("rtw_recv_indicatepkt():skb == NULL something wrong!!!!\n")); goto _recv_indicatepkt_drop; } RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("rtw_recv_indicatepkt():skb != NULL !!!\n")); RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("rtw_recv_indicatepkt():precv_frame->rx_head =%p precv_frame->hdr.rx_data =%p\n", precv_frame->rx_head, precv_frame->rx_data)); RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("precv_frame->hdr.rx_tail =%p precv_frame->rx_end =%p precv_frame->hdr.len =%d\n", precv_frame->rx_tail, precv_frame->rx_end, precv_frame->len)); skb->data = precv_frame->rx_data; skb_set_tail_pointer(skb, precv_frame->len); skb->len = precv_frame->len; RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n", skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->len)); if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { struct sk_buff *pskb2 = NULL; struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct rx_pkt_attrib *pattrib = &precv_frame->attrib; int bmcast = IS_MCAST(pattrib->dst); if (memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN)) { if (bmcast) { psta = rtw_get_bcmc_stainfo(padapter); pskb2 = skb_clone(skb, GFP_ATOMIC); } else { psta = rtw_get_stainfo(pstapriv, pattrib->dst); } if (psta) { struct net_device *pnetdev; pnetdev = (struct net_device *)padapter->pnetdev; skb->dev = pnetdev; skb_set_queue_mapping(skb, rtw_recv_select_queue(skb)); rtw_xmit_entry(skb, pnetdev); if (bmcast) skb = pskb2; else goto _recv_indicatepkt_end; } } } rcu_read_lock(); rcu_dereference(padapter->pnetdev->rx_handler_data); rcu_read_unlock(); skb->ip_summed = CHECKSUM_NONE; skb->dev = padapter->pnetdev; skb->protocol = eth_type_trans(skb, padapter->pnetdev); netif_rx(skb); _recv_indicatepkt_end: /* pointers to NULL before rtw_free_recvframe() */ precv_frame->pkt = NULL; rtw_free_recvframe(precv_frame, pfree_recv_queue); RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("\n rtw_recv_indicatepkt :after netif_rx!!!!\n")); return _SUCCESS; _recv_indicatepkt_drop: /* enqueue back to free_recv_queue */ rtw_free_recvframe(precv_frame, pfree_recv_queue); return _FAIL; }
/* * Au1000 receive routine. */ static int au1000_rx(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); struct sk_buff *skb; volatile rx_dma_t *prxd; u32 buff_stat, status; db_dest_t *pDB; u32 frmlen; if (au1000_debug > 5) printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head); prxd = aup->rx_dma_ring[aup->rx_head]; buff_stat = prxd->buff_stat; while (buff_stat & RX_T_DONE) { status = prxd->status; pDB = aup->rx_db_inuse[aup->rx_head]; update_rx_stats(dev, status); if (!(status & RX_ERROR)) { /* good frame */ frmlen = (status & RX_FRAME_LEN_MASK); frmlen -= 4; /* Remove FCS */ skb = dev_alloc_skb(frmlen + 2); if (skb == NULL) { printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; continue; } skb_reserve(skb, 2); /* 16 byte IP header align */ skb_copy_to_linear_data(skb, (unsigned char *)pDB->vaddr, frmlen); skb_put(skb, frmlen); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); /* pass the packet to upper layers */ } else { if (au1000_debug > 4) { if (status & RX_MISSED_FRAME) printk("rx miss\n"); if (status & RX_WDOG_TIMER) printk("rx wdog\n"); if (status & RX_RUNT) printk("rx runt\n"); if (status & RX_OVERLEN) printk("rx overlen\n"); if (status & RX_COLL) printk("rx coll\n"); if (status & RX_MII_ERROR) printk("rx mii error\n"); if (status & RX_CRC_ERROR) printk("rx crc error\n"); if (status & RX_LEN_ERROR) printk("rx len error\n"); if (status & RX_U_CNTRL_FRAME) printk("rx u control frame\n"); } } prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); au_sync(); /* next descriptor */ prxd = aup->rx_dma_ring[aup->rx_head]; buff_stat = prxd->buff_stat; } return 0; }
void ssh_virtual_adapter_send(SshInterceptor interceptor, SshInterceptorPacket pp) { SshVirtualAdapter adapter; SshInterceptorInternalPacket ipp = (SshInterceptorInternalPacket) pp; struct net_device_stats *stats; struct sk_buff *skb; local_bh_disable(); ssh_kernel_mutex_lock(interceptor->interceptor_lock); adapter = ssh_virtual_adapter_ifnum_to_adapter(interceptor, pp->ifnum_out); if (adapter == NULL) { ssh_kernel_mutex_unlock(interceptor->interceptor_lock); local_bh_enable(); SSH_DEBUG(SSH_D_ERROR, ("Virtual adapter %d does not exist", (int)pp->ifnum_out)); goto error; } /* Check the type of the source packet. */ if (pp->protocol == SSH_PROTOCOL_ETHERNET) { /* We can send this directly. */ } else if (pp->protocol == SSH_PROTOCOL_IP4 #ifdef SSH_LINUX_INTERCEPTOR_IPV6 || pp->protocol == SSH_PROTOCOL_IP6 #endif /* SSH_LINUX_INTERCEPTOR_IPV6 */ ) { unsigned char ether_hdr[SSH_ETHERH_HDRLEN]; SshIpAddrStruct src; SshUInt16 ethertype = SSH_ETHERTYPE_IP; unsigned char *cp = NULL; size_t packet_len; /* Add ethernet framing. */ /* Destination is virtual adapter's ethernet address. */ memcpy(ether_hdr + SSH_ETHERH_OFS_DST, adapter->dev->dev_addr, SSH_ETHERH_ADDRLEN); /* Resolve packet's source and the ethernet type to use. */ packet_len = ssh_interceptor_packet_len(pp); /* IPv4 */ if (pp->protocol == SSH_PROTOCOL_IP4) { if (packet_len < SSH_IPH4_HDRLEN) { ssh_kernel_mutex_unlock(interceptor->interceptor_lock); local_bh_enable(); SSH_DEBUG(SSH_D_ERROR, ("Packet is too short to contain IPv4 header")); goto error; } /* Pullup requests data from the header of a writable skb. */ if (likely(skb_headlen(ipp->skb) >= SSH_IPH4_HDRLEN && !skb_shared(ipp->skb) && SSH_SKB_WRITABLE(ipp->skb, SSH_IPH4_HDRLEN))) cp = ipp->skb->data; if (cp == NULL) { ssh_kernel_mutex_unlock(interceptor->interceptor_lock); local_bh_enable(); goto error_already_freed; } SSH_IPH4_SRC(&src, cp); } #ifdef SSH_LINUX_INTERCEPTOR_IPV6 /* IPv6 */ else { if (packet_len < SSH_IPH6_HDRLEN) { ssh_kernel_mutex_unlock(interceptor->interceptor_lock); local_bh_enable(); SSH_DEBUG(SSH_D_ERROR, ("Packet too short to contain IPv6 header")); goto error; } if (likely(skb_headlen(ipp->skb) >= SSH_IPH6_HDRLEN && !skb_shared(ipp->skb) && SSH_SKB_WRITABLE(ipp->skb, SSH_IPH6_HDRLEN))) cp = ipp->skb->data; if (cp == NULL) { ssh_kernel_mutex_unlock(interceptor->interceptor_lock); local_bh_enable(); goto error_already_freed; } SSH_IPH6_SRC(&src, cp); ethertype = SSH_ETHERTYPE_IPv6; } #endif /* SSH_LINUX_INTERCEPTOR_IPV6 */ /* Finalize ethernet header. */ ssh_virtual_adapter_ip_ether_address(&src, ether_hdr + SSH_ETHERH_OFS_SRC); SSH_PUT_16BIT(ether_hdr + SSH_ETHERH_OFS_TYPE, ethertype); /* Insert header to the packet. */ cp = NULL; if (likely((skb_headroom(ipp->skb) >= (SSH_ETHERH_HDRLEN + SSH_INTERCEPTOR_PACKET_HARD_HEAD_ROOM)) && !skb_shared(ipp->skb) && SSH_SKB_WRITABLE(ipp->skb, 0))) cp = skb_push(ipp->skb, SSH_ETHERH_HDRLEN); if (cp == NULL) { ssh_kernel_mutex_unlock(interceptor->interceptor_lock); goto error_already_freed; } memcpy(cp, ether_hdr, SSH_ETHERH_HDRLEN); /* Just to be pedantic. */ pp->protocol = SSH_PROTOCOL_ETHERNET; } else { ssh_kernel_mutex_unlock(interceptor->interceptor_lock); local_bh_enable(); SSH_DEBUG(SSH_D_ERROR, ("Can not handle protocol %d", pp->protocol)); goto error; } /* Tear off the internal packet from the generic SshInterceptorPacket. */ skb = ipp->skb; ipp->skb = NULL; /* (re-)receive the packet via the interface; this should make the packet go back up the stack */ skb->protocol = eth_type_trans(skb, adapter->dev); skb->dev = adapter->dev; /* Update per virtual adapter statistics. */ stats = &adapter->low_level_stats; stats->rx_packets++; stats->rx_bytes += skb->len; ssh_kernel_mutex_unlock(interceptor->interceptor_lock); local_bh_enable(); /* Send the skb up towards stack. If it is IP (or ARP), it will be intercepted by ssh_interceptor_packet_in. */ netif_rx(skb); /* Put the packet header on freelist. */ ssh_interceptor_packet_free((SshInterceptorPacket) ipp); return; error: ssh_interceptor_packet_free(pp); error_already_freed: return; }
static void ar6000_hci_pkt_recv(void *pContext, HTC_PACKET *pPacket) { AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)pContext; struct sk_buff *skb; AR_SOFTC_DEV_T *arDev = pHcidevInfo->ar->arDev[0]; A_ASSERT(pHcidevInfo != NULL); skb = (struct sk_buff *)pPacket->pPktContext; A_ASSERT(skb != NULL); do { if (A_FAILED(pPacket->Status)) { break; } AR_DEBUG_PRINTF(ATH_DEBUG_HCI_RECV, ("HCI Bridge, packet received type : %d len:%d \n", HCI_GET_PACKET_TYPE(pPacket),pPacket->ActualLength)); /* set the actual buffer position in the os buffer, HTC recv buffers posted to HCI are set * to fill the front of the buffer */ A_NETBUF_PUT(skb,pPacket->ActualLength + pHcidevInfo->HCIProps.HeadRoom); A_NETBUF_PULL(skb,pHcidevInfo->HCIProps.HeadRoom); if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_HCI_DUMP)) { AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("<<< Recv HCI %s packet len:%d \n", (HCI_GET_PACKET_TYPE(pPacket) == HCI_EVENT_TYPE) ? "EVENT" : "ACL", skb->len)); AR_DEBUG_PRINTBUF(skb->data, skb->len,"BT HCI RECV Packet Dump"); } if (pHcidevInfo->HciNormalMode) { /* indicate the packet */ if (bt_indicate_recv(pHcidevInfo,HCI_GET_PACKET_TYPE(pPacket),skb)) { /* bt stack accepted the packet */ skb = NULL; } break; } /* for testing, indicate packet to the network stack */ #ifdef EXPORT_HCI_BRIDGE_INTERFACE skb->dev = (struct net_device *)(pHcidevInfo->HCITransHdl.netDevice); if ((((struct net_device *)pHcidevInfo->HCITransHdl.netDevice)->flags & IFF_UP) == IFF_UP) { skb->protocol = eth_type_trans(skb, (struct net_device *)(pHcidevInfo->HCITransHdl.netDevice)); #else skb->dev = arDev->arNetDev; if ((arDev->arNetDev->flags & IFF_UP) == IFF_UP) { skb->protocol = eth_type_trans(skb, arDev->arNetDev); #endif netif_rx(skb); skb = NULL; } } while (FALSE); FreeHTCStruct(pHcidevInfo,pPacket); if (skb != NULL) { /* packet was not accepted, free it */ FreeBtOsBuf(pHcidevInfo,skb); } } static void ar6000_hci_pkt_refill(void *pContext, HCI_TRANSPORT_PACKET_TYPE Type, int BuffersAvailable) { AR6K_HCI_BRIDGE_INFO *pHcidevInfo = (AR6K_HCI_BRIDGE_INFO *)pContext; int refillCount; if (Type == HCI_ACL_TYPE) { refillCount = MAX_ACL_RECV_BUFS - BuffersAvailable; } else { refillCount = MAX_EVT_RECV_BUFS - BuffersAvailable; } if (refillCount > 0) { RefillRecvBuffers(pHcidevInfo,Type,refillCount); } }
static int ibmveth_poll(struct napi_struct *napi, int budget) { struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); struct net_device *netdev = adapter->netdev; int frames_processed = 0; unsigned long lpar_rc; struct iphdr *iph; restart_poll: while (frames_processed < budget) { if (!ibmveth_rxq_pending_buffer(adapter)) break; smp_rmb(); if (!ibmveth_rxq_buffer_valid(adapter)) { wmb(); /* suggested by larson1 */ adapter->rx_invalid_buffer++; netdev_dbg(netdev, "recycling invalid buffer\n"); ibmveth_rxq_recycle_buffer(adapter); } else { struct sk_buff *skb, *new_skb; int length = ibmveth_rxq_frame_length(adapter); int offset = ibmveth_rxq_frame_offset(adapter); int csum_good = ibmveth_rxq_csum_good(adapter); skb = ibmveth_rxq_get_buffer(adapter); new_skb = NULL; if (length < rx_copybreak) new_skb = netdev_alloc_skb(netdev, length); if (new_skb) { skb_copy_to_linear_data(new_skb, skb->data + offset, length); if (rx_flush) ibmveth_flush_buffer(skb->data, length + offset); if (!ibmveth_rxq_recycle_buffer(adapter)) kfree_skb(skb); skb = new_skb; } else { ibmveth_rxq_harvest_buffer(adapter); skb_reserve(skb, offset); } skb_put(skb, length); skb->protocol = eth_type_trans(skb, netdev); if (csum_good) { skb->ip_summed = CHECKSUM_UNNECESSARY; if (be16_to_cpu(skb->protocol) == ETH_P_IP) { iph = (struct iphdr *)skb->data; /* If the IP checksum is not offloaded and if the packet * is large send, the checksum must be rebuilt. */ if (iph->check == 0xffff) { iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); adapter->rx_large_packets++; } } } napi_gro_receive(napi, skb); /* send it up */ netdev->stats.rx_packets++; netdev->stats.rx_bytes += length; frames_processed++; } } ibmveth_replenish_task(adapter); if (frames_processed < budget) { napi_complete(napi); /* We think we are done - reenable interrupts, * then check once more to make sure we are done. */ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); BUG_ON(lpar_rc != H_SUCCESS); if (ibmveth_rxq_pending_buffer(adapter) && napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); goto restart_poll; } } return frames_processed; }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context, *skb2; struct eth_dev *dev = ep->driver_data; int status = req->status; switch (status) { /* normal completion */ case 0: skb_put(skb, req->actual); if (dev->unwrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { status = dev->unwrap(dev->port_usb, skb, &dev->rx_frames); } else { dev_kfree_skb_any(skb); status = -ENOTCONN; } spin_unlock_irqrestore(&dev->lock, flags); } else { skb_queue_tail(&dev->rx_frames, skb); } skb = NULL; skb2 = skb_dequeue(&dev->rx_frames); while (skb2) { if (status < 0 || ETH_HLEN > skb2->len || skb2->len > VLAN_ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb2->len); dev_kfree_skb_any(skb2); goto next_frame; } skb2->protocol = eth_type_trans(skb2, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb2->len; /* no buffer copies needed, unless hardware can't * use skb buffers. */ status = netif_rx(skb2); next_frame: skb2 = skb_dequeue(&dev->rx_frames); } break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } if (skb) dev_kfree_skb_any(skb); if (!netif_running(dev->net)) { clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); }
/* We have a good packet(s), get it/them out of the buffers. */ static void net_rx(struct device *dev) { struct net_local *lp = (struct net_local *)dev->priv; int ioaddr = dev->base_addr; int boguscount = 10; /* 5 -> 10: by agy 19940922 */ while ((inb(ioaddr + RX_MODE) & 0x40) == 0) { /* Clear PKT_RDY bit: by agy 19940922 */ /* outb(0x80, ioaddr + RX_STATUS); */ ushort status = inw(ioaddr + DATAPORT); if (net_debug > 4) printk("%s: Rxing packet mode %02x status %04x.\n", dev->name, inb(ioaddr + RX_MODE), status); #ifndef final_version if (status == 0) { outb(0x05, ioaddr + 14); break; } #endif if ((status & 0xF0) != 0x20) { /* There was an error. */ lp->stats.rx_errors++; if (status & 0x08) lp->stats.rx_length_errors++; if (status & 0x04) lp->stats.rx_frame_errors++; if (status & 0x02) lp->stats.rx_crc_errors++; if (status & 0x01) lp->stats.rx_over_errors++; } else { ushort pkt_len = inw(ioaddr + DATAPORT); /* Malloc up new buffer. */ struct sk_buff *skb; if (pkt_len > 1550) { printk("%s: The FMV-18x claimed a very large packet, size %d.\n", dev->name, pkt_len); outb(0x05, ioaddr + 14); lp->stats.rx_errors++; break; } skb = dev_alloc_skb(pkt_len+3); if (skb == NULL) { printk("%s: Memory squeeze, dropping packet (len %d).\n", dev->name, pkt_len); outb(0x05, ioaddr + 14); lp->stats.rx_dropped++; break; } skb->dev = dev; skb_reserve(skb,2); insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); if (net_debug > 5) { int i; printk("%s: Rxed packet of length %d: ", dev->name, pkt_len); for (i = 0; i < 14; i++) printk(" %02x", skb->data[i]); printk(".\n"); } skb->protocol=eth_type_trans(skb, dev); netif_rx(skb); lp->stats.rx_packets++; } if (--boguscount <= 0) break; } /* If any worth-while packets have been received, dev_rint() has done a mark_bh(NET_BH) for us and will work on them when we get to the bottom-half routine. */ { int i; for (i = 0; i < 20; i++) { if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40) break; (void)inw(ioaddr + DATAPORT); /* dummy status read */ outb(0x05, ioaddr + 14); } if (net_debug > 5 && i > 0) printk("%s: Exint Rx packet with mode %02x after %d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i); } return; }
static void mc32_rx_ring(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); volatile struct skb_header *p; u16 rx_ring_tail; u16 rx_old_tail; int x=0; rx_old_tail = rx_ring_tail = lp->rx_ring_tail; do { p=lp->rx_ring[rx_ring_tail].p; if(!(p->status & (1<<7))) { /* Not COMPLETED */ break; } if(p->status & (1<<6)) /* COMPLETED_OK */ { u16 length=p->length; struct sk_buff *skb; struct sk_buff *newskb; /* Try to save time by avoiding a copy on big frames */ if ((length > RX_COPYBREAK) && ((newskb=dev_alloc_skb(1532)) != NULL)) { skb=lp->rx_ring[rx_ring_tail].skb; skb_put(skb, length); skb_reserve(newskb,18); lp->rx_ring[rx_ring_tail].skb=newskb; p->data=isa_virt_to_bus(newskb->data); } else { skb=dev_alloc_skb(length+2); if(skb==NULL) { lp->net_stats.rx_dropped++; goto dropped; } skb_reserve(skb,2); memcpy(skb_put(skb, length), lp->rx_ring[rx_ring_tail].skb->data, length); } skb->protocol=eth_type_trans(skb,dev); skb->dev=dev; dev->last_rx = jiffies; lp->net_stats.rx_packets++; lp->net_stats.rx_bytes += length; netif_rx(skb); } dropped: p->length = 1532; p->status = 0; rx_ring_tail=next_rx(rx_ring_tail); } while(x++<48); /* If there was actually a frame to be processed, place the EOL bit */ /* at the descriptor prior to the one to be filled next */ if (rx_ring_tail != rx_old_tail) { lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL; lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL; lp->rx_ring_tail=rx_ring_tail; } }
static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { struct sgiseeq_rx_desc *rd; struct sk_buff *skb = NULL; struct sk_buff *newskb; unsigned char pkt_status; int len = 0; unsigned int orig_end = PREV_RX(sp->rx_new); /* Service every received packet. */ rd = &sp->rx_desc[sp->rx_new]; dma_sync_desc_cpu(dev, rd); while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, PKT_BUF_SZ, DMA_FROM_DEVICE); pkt_status = rd->skb->data[len]; if (pkt_status & SEEQ_RSTAT_FIG) { /* Packet is OK. */ /* We don't want to receive our own packets */ if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) { if (len > rx_copybreak) { skb = rd->skb; newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); if (!newskb) { newskb = skb; skb = NULL; goto memory_squeeze; } skb_reserve(newskb, 2); } else { skb = netdev_alloc_skb(dev, len + 2); if (skb) { skb_reserve(skb, 2); skb_copy_to_linear_data(skb, rd->skb->data, len); } newskb = rd->skb; } memory_squeeze: if (skb) { skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += len; } else { printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", dev->name); dev->stats.rx_dropped++; } } else { /* Silently drop my own packets */ newskb = rd->skb; } } else { record_rx_errors(dev, pkt_status); newskb = rd->skb; } rd->skb = newskb; rd->rdma.pbuf = dma_map_single(dev->dev.parent, newskb->data - 2, PKT_BUF_SZ, DMA_FROM_DEVICE); /* Return the entry to the ring pool. */ rd->rdma.cntinfo = RCNTINFO_INIT; sp->rx_new = NEXT_RX(sp->rx_new); dma_sync_desc_dev(dev, rd); rd = &sp->rx_desc[sp->rx_new]; dma_sync_desc_cpu(dev, rd); } dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]); sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]); dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); rx_maybe_restart(sp, hregs, sregs); }
/* * netvsc_recv_callback - Callback when we receive a packet from the * "wire" on the specified device. */ static int netvsc_recv_callback(struct hv_device *device_obj, struct hv_netvsc_packet *packet) { struct vm_device *device_ctx = to_vm_device(device_obj); struct net_device *net = dev_get_drvdata(&device_ctx->device); struct sk_buff *skb; void *data; int i; unsigned long flags; DPRINT_ENTER(NETVSC_DRV); if (!net) { DPRINT_ERR(NETVSC_DRV, "got receive callback but net device " "not initialized yet"); return 0; } /* Allocate a skb - TODO direct I/O to pages? */ skb = netdev_alloc_skb_ip_align(net, packet->TotalDataBufferLength); if (unlikely(!skb)) { ++net->stats.rx_dropped; return 0; } /* for kmap_atomic */ local_irq_save(flags); /* * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ for (i = 0; i < packet->PageBufferCount; i++) { data = kmap_atomic(pfn_to_page(packet->PageBuffers[i].Pfn), KM_IRQ1); data = (void *)(unsigned long)data + packet->PageBuffers[i].Offset; memcpy(skb_put(skb, packet->PageBuffers[i].Length), data, packet->PageBuffers[i].Length); kunmap_atomic((void *)((unsigned long)data - packet->PageBuffers[i].Offset), KM_IRQ1); } local_irq_restore(flags); skb->protocol = eth_type_trans(skb, net); skb->ip_summed = CHECKSUM_NONE; net->stats.rx_packets++; net->stats.rx_bytes += skb->len; /* * Pass the skb back up. Network stack will deallocate the skb when it * is done. * TODO - use NAPI? */ netif_rx(skb); DPRINT_DBG(NETVSC_DRV, "# of recvs %lu total size %lu", net->stats.rx_packets, net->stats.rx_bytes); DPRINT_EXIT(NETVSC_DRV); return 0; }
int rtw_recv_indicatepkt(_adapter *padapter, union recv_frame *precv_frame) { struct recv_priv *precvpriv; _queue *pfree_recv_queue; _pkt *skb; struct mlme_priv*pmlmepriv = &padapter->mlmepriv; #ifdef CONFIG_TCP_CSUM_OFFLOAD_RX struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; #endif #ifdef CONFIG_BR_EXT void *br_port = NULL; #endif _func_enter_; precvpriv = &(padapter->recvpriv); pfree_recv_queue = &(precvpriv->free_recv_queue); #ifdef CONFIG_DRVEXT_MODULE if (drvext_rx_handler(padapter, precv_frame->u.hdr.rx_data, precv_frame->u.hdr.len) == _SUCCESS) { goto _recv_indicatepkt_drop; } #endif #ifdef CONFIG_WAPI_SUPPORT if (rtw_wapi_check_for_drop(padapter,precv_frame)) { WAPI_TRACE(WAPI_ERR, "%s(): Rx Reorder Drop case!!\n", __FUNCTION__); goto _recv_indicatepkt_drop; } #endif skb = precv_frame->u.hdr.pkt; if(skb == NULL) { RT_TRACE(_module_recv_osdep_c_,_drv_err_,("rtw_recv_indicatepkt():skb==NULL something wrong!!!!\n")); goto _recv_indicatepkt_drop; } RT_TRACE(_module_recv_osdep_c_,_drv_info_,("rtw_recv_indicatepkt():skb != NULL !!!\n")); RT_TRACE(_module_recv_osdep_c_,_drv_info_,("rtw_recv_indicatepkt():precv_frame->u.hdr.rx_head=%p precv_frame->hdr.rx_data=%p\n", precv_frame->u.hdr.rx_head, precv_frame->u.hdr.rx_data)); RT_TRACE(_module_recv_osdep_c_,_drv_info_,("precv_frame->hdr.rx_tail=%p precv_frame->u.hdr.rx_end=%p precv_frame->hdr.len=%d \n", precv_frame->u.hdr.rx_tail, precv_frame->u.hdr.rx_end, precv_frame->u.hdr.len)); skb->data = precv_frame->u.hdr.rx_data; skb_set_tail_pointer(skb, precv_frame->u.hdr.len); skb->len = precv_frame->u.hdr.len; RT_TRACE(_module_recv_osdep_c_,_drv_info_,("\n skb->head=%p skb->data=%p skb->tail=%p skb->end=%p skb->len=%d\n", skb->head, skb->data, skb->tail, skb->end, skb->len)); if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE) { _pkt *pskb2=NULL; struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; int bmcast = IS_MCAST(pattrib->dst); //DBG_871X("bmcast=%d\n", bmcast); if(_rtw_memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN)==_FALSE) { //DBG_871X("not ap psta=%p, addr=%pM\n", psta, pattrib->dst); if(bmcast) { psta = rtw_get_bcmc_stainfo(padapter); pskb2 = rtw_skb_clone(skb); } else { psta = rtw_get_stainfo(pstapriv, pattrib->dst); } if(psta) { struct net_device *pnetdev= (struct net_device*)padapter->pnetdev; //DBG_871X("directly forwarding to the rtw_xmit_entry\n"); //skb->ip_summed = CHECKSUM_NONE; skb->dev = pnetdev; #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35)) skb_set_queue_mapping(skb, rtw_recv_select_queue(skb)); #endif //LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35) _rtw_xmit_entry(skb, pnetdev); if(bmcast) skb = pskb2; else goto _recv_indicatepkt_end; } } else// to APself { //DBG_871X("to APSelf\n"); } } #ifdef CONFIG_BR_EXT #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) br_port = padapter->pnetdev->br_port; #else // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) rcu_read_lock(); br_port = rcu_dereference(padapter->pnetdev->rx_handler_data); rcu_read_unlock(); #endif // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) if( br_port && (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) ) { int nat25_handle_frame(_adapter *priv, struct sk_buff *skb); if (nat25_handle_frame(padapter, skb) == -1) { //priv->ext_stats.rx_data_drops++; //DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n"); //return FAIL; #if 1 // bypass this frame to upper layer!! #else goto _recv_indicatepkt_drop; #endif } } #endif // CONFIG_BR_EXT #ifdef CONFIG_TCP_CSUM_OFFLOAD_RX if ( (pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1) ) { skb->ip_summed = CHECKSUM_UNNECESSARY; //DBG_871X("CHECKSUM_UNNECESSARY \n"); } else { skb->ip_summed = CHECKSUM_NONE; //DBG_871X("CHECKSUM_NONE(%d, %d) \n", pattrib->tcpchk_valid, pattrib->tcp_chkrpt); } #else /* !CONFIG_TCP_CSUM_OFFLOAD_RX */ skb->ip_summed = CHECKSUM_NONE; #endif skb->dev = padapter->pnetdev; skb->protocol = eth_type_trans(skb, padapter->pnetdev); #ifdef DBG_TRX_STA_PKTS { struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; int bmcast = IS_MCAST(pattrib->dst); if(bmcast) { psta = rtw_get_bcmc_stainfo(padapter); } else { psta = rtw_get_stainfo(pstapriv, pattrib->src); } if(psta) { switch(pattrib->priority) { case 1: case 2: psta->rx_bk_cnt++; break; case 4: case 5: psta->rx_vi_cnt++; break; case 6: case 7: psta->rx_vo_cnt++; break; case 0: case 3: default: psta->rx_be_cnt++; break; } } } #endif rtw_netif_rx(padapter->pnetdev, skb); _recv_indicatepkt_end: precv_frame->u.hdr.pkt = NULL; // pointers to NULL before rtw_free_recvframe() rtw_free_recvframe(precv_frame, pfree_recv_queue); RT_TRACE(_module_recv_osdep_c_,_drv_info_,("\n rtw_recv_indicatepkt :after rtw_netif_rx!!!!\n")); _func_exit_; return _SUCCESS; _recv_indicatepkt_drop: //enqueue back to free_recv_queue if(precv_frame) rtw_free_recvframe(precv_frame, pfree_recv_queue); return _FAIL; _func_exit_; }
/* Called in soft-irq context */ static void smd_net_data_handler(unsigned long arg) { struct net_device *dev = (struct net_device *) arg; struct rmnet_private *p = netdev_priv(dev); struct sk_buff *skb; void *ptr = 0; int sz; u32 opmode = p->operation_mode; unsigned long flags; for (;;) { sz = smd_cur_packet_size(p->ch); if (sz == 0) break; if (smd_read_avail(p->ch) < sz) break; if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) : (sz > (dev->mtu + ETH_HLEN))) { pr_err("rmnet_recv() discarding %d len (%d mtu)\n", sz, RMNET_IS_MODE_IP(opmode) ? dev->mtu : (dev->mtu + ETH_HLEN)); ptr = 0; } else { skb = dev_alloc_skb(sz + NET_IP_ALIGN); if (skb == NULL) { pr_err("rmnet_recv() cannot allocate skb\n"); } else { skb->dev = dev; skb_reserve(skb, NET_IP_ALIGN); ptr = skb_put(skb, sz); wake_lock_timeout(&p->wake_lock, HZ / 2); if (smd_read(p->ch, ptr, sz) != sz) { pr_err("rmnet_recv() smd lied about avail?!"); ptr = 0; dev_kfree_skb_irq(skb); } else { /* Handle Rx frame format */ spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_IP(opmode)) { /* Driver in IP mode */ skb->protocol = rmnet_ip_type_trans(skb, dev); } else { /* Driver in Ethernet mode */ skb->protocol = eth_type_trans(skb, dev); } if (RMNET_IS_MODE_IP(opmode) || count_this_packet(ptr, skb->len)) { #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += rmnet_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } netif_rx(skb); } continue; } } if (smd_read(p->ch, ptr, sz) != sz) pr_err("rmnet_recv() smd lied about avail?!"); } }
/* Incoming data */ static void zd1201_usbrx(struct urb *urb) { struct zd1201 *zd = urb->context; int free = 0; unsigned char *data = urb->transfer_buffer; struct sk_buff *skb; unsigned char type; if (!zd) return; switch(urb->status) { case -EILSEQ: case -ENODEV: case -ETIME: case -ENOENT: case -EPIPE: case -EOVERFLOW: case -ESHUTDOWN: dev_warn(&zd->usb->dev, "%s: rx urb failed: %d\n", zd->dev->name, urb->status); free = 1; goto exit; } if (urb->status != 0 || urb->actual_length == 0) goto resubmit; type = data[0]; if (type == ZD1201_PACKET_EVENTSTAT || type == ZD1201_PACKET_RESOURCE) { memcpy(zd->rxdata, data, urb->actual_length); zd->rxlen = urb->actual_length; zd->rxdatas = 1; wake_up(&zd->rxdataq); } /* Info frame */ if (type == ZD1201_PACKET_INQUIRE) { int i = 0; unsigned short infotype, framelen, copylen; framelen = le16_to_cpu(*(__le16*)&data[4]); infotype = le16_to_cpu(*(__le16*)&data[6]); if (infotype == ZD1201_INF_LINKSTATUS) { short linkstatus; linkstatus = le16_to_cpu(*(__le16*)&data[8]); switch(linkstatus) { case 1: netif_carrier_on(zd->dev); break; case 2: netif_carrier_off(zd->dev); break; case 3: netif_carrier_off(zd->dev); break; case 4: netif_carrier_on(zd->dev); break; default: netif_carrier_off(zd->dev); } goto resubmit; } if (infotype == ZD1201_INF_ASSOCSTATUS) { short status = le16_to_cpu(*(__le16*)(data+8)); int event; union iwreq_data wrqu; switch (status) { case ZD1201_ASSOCSTATUS_STAASSOC: case ZD1201_ASSOCSTATUS_REASSOC: event = IWEVREGISTERED; break; case ZD1201_ASSOCSTATUS_DISASSOC: case ZD1201_ASSOCSTATUS_ASSOCFAIL: case ZD1201_ASSOCSTATUS_AUTHFAIL: default: event = IWEVEXPIRED; } memcpy(wrqu.addr.sa_data, data+10, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(zd->dev, event, &wrqu, NULL); goto resubmit; } if (infotype == ZD1201_INF_AUTHREQ) { union iwreq_data wrqu; memcpy(wrqu.addr.sa_data, data+8, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* There isn't a event that trully fits this request. We assume that userspace will be smart enough to see a new station being expired and sends back a authstation ioctl to authorize it. */ wireless_send_event(zd->dev, IWEVEXPIRED, &wrqu, NULL); goto resubmit; } /* Other infotypes are handled outside this handler */ zd->rxlen = 0; while (i < urb->actual_length) { copylen = le16_to_cpu(*(__le16*)&data[i+2]); /* Sanity check, sometimes we get junk */ if (copylen+zd->rxlen > sizeof(zd->rxdata)) break; memcpy(zd->rxdata+zd->rxlen, data+i+4, copylen); zd->rxlen += copylen; i += 64; } if (i >= urb->actual_length) { zd->rxdatas = 1; wake_up(&zd->rxdataq); } goto resubmit; } /* Actual data */ if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) { int datalen = urb->actual_length-1; unsigned short len, fc, seq; struct hlist_node *node; len = ntohs(*(__be16 *)&data[datalen-2]); if (len>datalen) len=datalen; fc = le16_to_cpu(*(__le16 *)&data[datalen-16]); seq = le16_to_cpu(*(__le16 *)&data[datalen-24]); if (zd->monitor) { if (datalen < 24) goto resubmit; if (!(skb = dev_alloc_skb(datalen+24))) goto resubmit; memcpy(skb_put(skb, 2), &data[datalen-16], 2); memcpy(skb_put(skb, 2), &data[datalen-2], 2); memcpy(skb_put(skb, 6), &data[datalen-14], 6); memcpy(skb_put(skb, 6), &data[datalen-22], 6); memcpy(skb_put(skb, 6), &data[datalen-8], 6); memcpy(skb_put(skb, 2), &data[datalen-24], 2); memcpy(skb_put(skb, len), data, len); skb->protocol = eth_type_trans(skb, zd->dev); zd->dev->stats.rx_packets++; zd->dev->stats.rx_bytes += skb->len; netif_rx(skb); goto resubmit; } if ((seq & IEEE80211_SCTL_FRAG) || (fc & IEEE80211_FCTL_MOREFRAGS)) { struct zd1201_frag *frag = NULL; char *ptr; if (datalen<14) goto resubmit; if ((seq & IEEE80211_SCTL_FRAG) == 0) { frag = kmalloc(sizeof(*frag), GFP_ATOMIC); if (!frag) goto resubmit; skb = dev_alloc_skb(IEEE80211_MAX_DATA_LEN +14+2); if (!skb) { kfree(frag); goto resubmit; } frag->skb = skb; frag->seq = seq & IEEE80211_SCTL_SEQ; skb_reserve(skb, 2); memcpy(skb_put(skb, 12), &data[datalen-14], 12); memcpy(skb_put(skb, 2), &data[6], 2); memcpy(skb_put(skb, len), data+8, len); hlist_add_head(&frag->fnode, &zd->fraglist); goto resubmit; } hlist_for_each_entry(frag, node, &zd->fraglist, fnode) if (frag->seq == (seq&IEEE80211_SCTL_SEQ)) break; if (!frag) goto resubmit; skb = frag->skb; ptr = skb_put(skb, len); if (ptr) memcpy(ptr, data+8, len); if (fc & IEEE80211_FCTL_MOREFRAGS) goto resubmit; hlist_del_init(&frag->fnode); kfree(frag); } else { if (datalen<14)