/* Passes this packet up the stack, updating its accounting. * Some link protocols batch packets, so their rx_fixup paths * can return clones as well as just modify the original skb. */ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) { int status; if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { skb_queue_tail(&dev->rxq_pause, skb); return; } if (!skb->protocol) skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", skb->len + sizeof (struct ethhdr), skb->protocol); memset (skb->cb, 0, sizeof (struct skb_data)); status = netif_rx_ni(skb); if (status != NET_RX_SUCCESS) netif_dbg(dev, rx_err, dev->net, "netif_rx status %d\n", status); }
static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) { int xfer_sz = 0; int nfrms = 0; u16 *plen = NULL; u8 *pfrm = NULL; if ((desc->header & ~CFHSI_PIGGY_DESC) || (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", __func__); return -EPROTO; } /* Check for embedded CAIF frame. */ if (desc->offset) { struct sk_buff *skb; u8 *dst = NULL; int len = 0; pfrm = ((u8 *)desc) + desc->offset; /* Remove offset padding. */ pfrm += *pfrm + 1; /* Read length of CAIF frame (little endian). */ len = *pfrm; len |= ((*(pfrm+1)) << 8) & 0xFF00; len += 2; /* Add FCS fields. */ /* Sanity check length of CAIF frame. */ if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n", __func__); return -EPROTO; } /* Allocate SKB (OK even in IRQ context). */ skb = alloc_skb(len + 1, GFP_ATOMIC); if (!skb) { dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", __func__); return -ENOMEM; } caif_assert(skb != NULL); dst = skb_put(skb, len); memcpy(dst, pfrm, len); skb->protocol = htons(ETH_P_CAIF); skb_reset_mac_header(skb); skb->dev = cfhsi->ndev; /* * We are called from a arch specific platform device. * Unfortunately we don't know what context we're * running in. */ if (in_interrupt()) netif_rx(skb); else netif_rx_ni(skb); cfhsi_notify_rx(cfhsi); /* Update network statistics. */ cfhsi->ndev->stats.rx_packets++; cfhsi->ndev->stats.rx_bytes += len; } /* Calculate transfer length. */ plen = desc->cffrm_len; while (nfrms < CFHSI_MAX_PKTS && *plen) { xfer_sz += *plen; plen++; nfrms++; } /* Check for piggy-backed descriptor. */ if (desc->header & CFHSI_PIGGY_DESC) xfer_sz += CFHSI_DESC_SZ; if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX-CFHSI_DESC_SZ))) { dev_err(&cfhsi->ndev->dev, "%s: Invalid payload len: %d, ignored.\n", __func__, xfer_sz); return -EPROTO; } return xfer_sz; }
/* Send one completely decapsulated can_frame to the network layer */ static void slc_bump(struct slcan *sl) { struct sk_buff *skb; struct can_frame cf; int i, tmp; u32 tmpid; char *cmd = sl->rbuff; cf.can_id = 0; switch (*cmd) { case 'r': cf.can_id = CAN_RTR_FLAG; /* fallthrough */ case 't': /* store dlc ASCII value and terminate SFF CAN ID string */ cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN]; sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0; /* point to payload data behind the dlc */ cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1; break; case 'R': cf.can_id = CAN_RTR_FLAG; /* fallthrough */ case 'T': cf.can_id |= CAN_EFF_FLAG; /* store dlc ASCII value and terminate EFF CAN ID string */ cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN]; sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0; /* point to payload data behind the dlc */ cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1; break; default: return; } if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid)) return; cf.can_id |= tmpid; /* get can_dlc from sanitized ASCII value */ if (cf.can_dlc >= '0' && cf.can_dlc < '9') cf.can_dlc -= '0'; else return; *(u64 *) (&cf.data) = 0; /* clear payload */ /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf.can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf.can_dlc; i++) { tmp = hex_to_bin(*cmd++); if (tmp < 0) return; cf.data[i] = (tmp << 4); tmp = hex_to_bin(*cmd++); if (tmp < 0) return; cf.data[i] |= tmp; } } skb = dev_alloc_skb(sizeof(struct can_frame) + sizeof(struct can_skb_priv)); if (!skb) return; skb->dev = sl->dev; skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; can_skb_reserve(skb); can_skb_prv(skb)->ifindex = sl->dev->ifindex; can_skb_prv(skb)->skbcnt = 0; memcpy(skb_put(skb, sizeof(struct can_frame)), &cf, sizeof(struct can_frame)); sl->dev->stats.rx_packets++; sl->dev->stats.rx_bytes += cf.can_dlc; netif_rx_ni(skb); }
/**============================================================================ @brief hdd_rx_packet_cbk() - Receive callback registered with TL. TL will call this to notify the HDD when one or more packets were received for a registered STA. @param vosContext : [in] pointer to VOS context @param pVosPacketChain : [in] pointer to VOS packet chain @param staId : [in] Station Id @param pRxMetaInfo : [in] pointer to meta info for the received pkt(s) @return : VOS_STATUS_E_FAILURE if any errors encountered, : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ VOS_STATUS hdd_rx_packet_cbk( v_VOID_t *vosContext, vos_pkt_t *pVosPacketChain, v_U8_t staId, WLANTL_RxMetaInfoType* pRxMetaInfo ) { hdd_adapter_t *pAdapter = NULL; hdd_context_t *pHddCtx = NULL; VOS_STATUS status = VOS_STATUS_E_FAILURE; int rxstat; struct sk_buff *skb = NULL; vos_pkt_t* pVosPacket; vos_pkt_t* pNextVosPacket; //Sanity check on inputs if ( ( NULL == vosContext ) || ( NULL == pVosPacketChain ) || ( NULL == pRxMetaInfo ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: Null params being passed", __FUNCTION__); return VOS_STATUS_E_FAILURE; } pHddCtx = (hdd_context_t *)vos_get_context( VOS_MODULE_ID_HDD, vosContext ); if ( NULL == pHddCtx ) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: HDD adapter context is Null", __FUNCTION__); return VOS_STATUS_E_FAILURE; } pAdapter = pHddCtx->sta_to_adapter[staId]; if( NULL == pAdapter ) { VOS_ASSERT(0); return VOS_STATUS_E_FAILURE; } ++pAdapter->hdd_stats.hddTxRxStats.rxChains; // walk the chain until all are processed pVosPacket = pVosPacketChain; do { // get the pointer to the next packet in the chain // (but don't unlink the packet since we free the entire chain later) status = vos_pkt_walk_packet_chain( pVosPacket, &pNextVosPacket, VOS_FALSE); // both "success" and "empty" are acceptable results if (!((status == VOS_STATUS_SUCCESS) || (status == VOS_STATUS_E_EMPTY))) { ++pAdapter->hdd_stats.hddTxRxStats.rxDropped; VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: Failure walking packet chain", __FUNCTION__); return VOS_STATUS_E_FAILURE; } // Extract the OS packet (skb). // Tell VOS to detach the OS packet from the VOS packet status = vos_pkt_get_os_packet( pVosPacket, (v_VOID_t **)&skb, VOS_TRUE ); if(!VOS_IS_STATUS_SUCCESS( status )) { ++pAdapter->hdd_stats.hddTxRxStats.rxDropped; VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: Failure extracting skb from vos pkt", __FUNCTION__); return VOS_STATUS_E_FAILURE; } if (WLAN_HDD_ADAPTER_MAGIC != pAdapter->magic) { VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "Magic cookie(%x) for adapter sanity verification is invalid", pAdapter->magic); return eHAL_STATUS_FAILURE; } skb->dev = pAdapter->dev; skb->protocol = eth_type_trans(skb, skb->dev); skb->ip_summed = CHECKSUM_NONE; ++pAdapter->hdd_stats.hddTxRxStats.rxPackets; ++pAdapter->stats.rx_packets; pAdapter->stats.rx_bytes += skb->len; #ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK wake_lock_timeout(&pHddCtx->rx_wake_lock, HDD_WAKE_LOCK_DURATION); #endif rxstat = netif_rx_ni(skb); if (NET_RX_SUCCESS == rxstat) { ++pAdapter->hdd_stats.hddTxRxStats.rxDelivered; } else { ++pAdapter->hdd_stats.hddTxRxStats.rxRefused; } // now process the next packet in the chain pVosPacket = pNextVosPacket; } while (pVosPacket); //Return the entire VOS packet chain to the resource pool status = vos_pkt_return_packet( pVosPacketChain ); if(!VOS_IS_STATUS_SUCCESS( status )) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: Failure returning vos pkt", __FUNCTION__); } pAdapter->dev->last_rx = jiffies; return status; }
void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) { unsigned char *eth; uint len; struct sk_buff *skb, *pnext; struct brcmf_if *ifp; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; u8 ifidx; int ret; brcmf_dbg(TRACE, "Enter\n"); skb_queue_walk_safe(skb_list, skb, pnext) { skb_unlink(skb, skb_list); /* process and remove protocol-specific header */ ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb); ifp = drvr->iflist[ifidx]; if (ret || !ifp || !ifp->ndev) { if ((ret != -ENODATA) && ifp) ifp->stats.rx_errors++; brcmu_pkt_buf_free_skb(skb); continue; } /* Get the protocol, maintain skb around eth_type_trans() * The main reason for this hack is for the limitation of * Linux 2.4 where 'eth_type_trans' uses the * 'net->hard_header_len' * to perform skb_pull inside vs ETH_HLEN. Since to avoid * coping of the packet coming from the network stack to add * BDC, Hardware header etc, during network interface * registration * we set the 'net->hard_header_len' to ETH_HLEN + extra space * required * for BDC, Hardware header etc. and not just the ETH_HLEN */ eth = skb->data; len = skb->len; skb->dev = ifp->ndev; skb->protocol = eth_type_trans(skb, skb->dev); if (skb->pkt_type == PACKET_MULTICAST) ifp->stats.multicast++; skb->data = eth; skb->len = len; /* Strip header, count, deliver upward */ skb_pull(skb, ETH_HLEN); /* Process special event packets */ brcmf_fweh_process_skb(drvr, skb); if (!(ifp->ndev->flags & IFF_UP)) { brcmu_pkt_buf_free_skb(skb); continue; } ifp->stats.rx_bytes += skb->len; ifp->stats.rx_packets++; if (in_interrupt()) netif_rx(skb); else /* If the receive is not processed inside an ISR, * the softirqd must be woken explicitly to service * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled * by netif_rx_ni(), but in earlier kernels, we need * to do it manually. */ netif_rx_ni(skb); }
/** * Unpack a just received skb and hand it over to * upper layers. * * ch The channel where this skb has been received. * pskb The received skb. */ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; __u16 len = *((__u16 *) pskb->data); skb_put(pskb, 2 + LL_HEADER_LENGTH); skb_pull(pskb, 2); pskb->dev = dev; pskb->ip_summed = CHECKSUM_UNNECESSARY; while (len > 0) { struct sk_buff *skb; int skblen; struct ll_header *header = (struct ll_header *)pskb->data; skb_pull(pskb, LL_HEADER_LENGTH); if ((ch->protocol == CTCM_PROTO_S390) && (header->type != ETH_P_IP)) { if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) { ch->logflags |= LOG_FLAG_ILLEGALPKT; /* * Check packet type only if we stick strictly * to S/390's protocol of OS390. This only * supports IP. Otherwise allow any packet * type. */ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): Illegal packet type 0x%04x" " - dropping", CTCM_FUNTAIL, dev->name, header->type); } priv->stats.rx_dropped++; priv->stats.rx_frame_errors++; return; } pskb->protocol = ntohs(header->type); if ((header->length <= LL_HEADER_LENGTH) || (len <= LL_HEADER_LENGTH)) { if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): Illegal packet size %d(%d,%d)" "- dropping", CTCM_FUNTAIL, dev->name, header->length, dev->mtu, len); ch->logflags |= LOG_FLAG_ILLEGALSIZE; } priv->stats.rx_dropped++; priv->stats.rx_length_errors++; return; } header->length -= LL_HEADER_LENGTH; len -= LL_HEADER_LENGTH; if ((header->length > skb_tailroom(pskb)) || (header->length > len)) { if (!(ch->logflags & LOG_FLAG_OVERRUN)) { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): Packet size %d (overrun)" " - dropping", CTCM_FUNTAIL, dev->name, header->length); ch->logflags |= LOG_FLAG_OVERRUN; } priv->stats.rx_dropped++; priv->stats.rx_length_errors++; return; } skb_put(pskb, header->length); skb_reset_mac_header(pskb); len -= header->length; skb = dev_alloc_skb(pskb->len); if (!skb) { if (!(ch->logflags & LOG_FLAG_NOMEM)) { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): MEMORY allocation error", CTCM_FUNTAIL, dev->name); ch->logflags |= LOG_FLAG_NOMEM; } priv->stats.rx_dropped++; return; } skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), pskb->len); skb_reset_mac_header(skb); skb->dev = pskb->dev; skb->protocol = pskb->protocol; pskb->ip_summed = CHECKSUM_UNNECESSARY; skblen = skb->len; /* * reset logflags */ ch->logflags = 0; priv->stats.rx_packets++; priv->stats.rx_bytes += skblen; netif_rx_ni(skb); if (len > 0) { skb_pull(pskb, header->length); if (skb_tailroom(pskb) < LL_HEADER_LENGTH) { CTCM_DBF_DEV_NAME(TRACE, dev, "Overrun in ctcm_unpack_skb"); ch->logflags |= LOG_FLAG_OVERRUN; return; } skb_put(pskb, LL_HEADER_LENGTH); } } }
void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb) { netif_rx_ni(skb); }
/*---------------------------------------------------------------- * p80211netdev_rx_bh * * Deferred processing of all received frames. * * Arguments: * wlandev WLAN network device structure * skb skbuff containing a full 802.11 frame. * Returns: * nothing * Side effects: * ----------------------------------------------------------------*/ static void p80211netdev_rx_bh(unsigned long arg) { wlandevice_t *wlandev = (wlandevice_t *) arg; struct sk_buff *skb = NULL; netdevice_t *dev = wlandev->netdev; p80211_hdr_a3_t *hdr; UINT16 fc; DBFENTER; /* Let's empty our our queue */ while ( (skb = skb_dequeue(&wlandev->nsd_rxq)) ) { if (wlandev->state == WLAN_DEVICE_OPEN) { if (dev->type != ARPHRD_ETHER) { /* RAW frame; we shouldn't convert it */ // XXX Append the Prism Header here instead. /* set up various data fields */ skb->dev = dev; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_80211_RAW); dev->last_rx = jiffies; wlandev->linux_stats.rx_packets++; wlandev->linux_stats.rx_bytes += skb->len; netif_rx_ni(skb); continue; } else { hdr = (p80211_hdr_a3_t *)skb->data; fc = ieee2host16(hdr->fc); if (p80211_rx_typedrop(wlandev, fc)) { dev_kfree_skb(skb); continue; } /* perform mcast filtering */ if (wlandev->netdev->flags & IFF_ALLMULTI) { /* allow my local address through */ if (memcmp(hdr->a1, wlandev->netdev->dev_addr, WLAN_ADDR_LEN) != 0) { /* but reject anything else that isn't multicast */ if (!(hdr->a1[0] & 0x01)) { dev_kfree_skb(skb); continue; } } } if ( skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0 ) { skb->dev->last_rx = jiffies; wlandev->linux_stats.rx_packets++; wlandev->linux_stats.rx_bytes += skb->len; netif_rx_ni(skb); continue; } WLAN_LOG_DEBUG(1, "p80211_to_ether failed.\n"); } } dev_kfree_skb(skb); } DBFEXIT; }
static int ccmni_rx_callback(int md_id, int rx_ch, struct sk_buff *skb, void *priv_data) { ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id]; // struct ccci_header *ccci_h = (struct ccci_header*)skb->data; ccmni_instance_t *ccmni = NULL; struct net_device *dev = NULL; int pkt_type, skb_len, ccmni_idx; if (unlikely(ctlb == NULL || ctlb->ccci_ops == NULL)) { CCMNI_ERR_MSG(md_id, "invalid CCMNI ctrl/ops struct for RX_CH(%d)\n", rx_ch); dev_kfree_skb(skb); return -1; } ccmni_idx = get_ccmni_idx_from_ch(md_id, rx_ch); if (unlikely(ccmni_idx < 0)) { CCMNI_ERR_MSG(md_id, "CCMNI rx(%d) skb ch error\n", rx_ch); dev_kfree_skb(skb); return -1; } ccmni = ctlb->ccmni_inst[ccmni_idx]; dev = ccmni->dev; // skb_pull(skb, sizeof(struct ccci_header)); pkt_type = skb->data[0] & 0xF0; ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, pkt_type); skb_set_mac_header(skb, -ETH_HLEN); skb->dev = dev; if(pkt_type == 0x60) { skb->protocol = htons(ETH_P_IPV6); } else { skb->protocol = htons(ETH_P_IP); } skb->ip_summed = CHECKSUM_NONE; skb_len = skb->len; if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_RX)) { CCMNI_INF_MSG(md_id, "[RX]CCMNI%d(rx_ch=%d) recv data_len=%d\n", ccmni_idx, rx_ch, skb->len); } if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_RX_SKB)) { ccmni_dbg_skb_header(ccmni->md_id, false, skb); } if(likely(ctlb->ccci_ops->md_ability & MODEM_CAP_NAPI)) { netif_receive_skb(skb); } else { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) if(!in_interrupt()) { netif_rx_ni(skb); } else { netif_rx(skb); } #else netif_rx(skb); #endif } dev->stats.rx_packets++; dev->stats.rx_bytes += skb_len; wake_lock_timeout(&ctlb->ccmni_wakelock, HZ); return 0; }
static int omapl_pru_can_rx(struct net_device *ndev, u32 mbxno) { struct omapl_pru_can_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; u32 pru_can_mbx_data; u8 *data = NULL; u8 *ptr8data = NULL; int count = 0; skb = alloc_can_skb(ndev, &cf); if (!skb) { if (printk_ratelimit()) dev_err(priv->ndev->dev.parent, "ti_pru_can_rx_pkt: alloc_can_skb() failed\n"); return -ENOMEM; } data = cf->data; /* get payload */ priv->can_rx_hndl.ecanmailboxnumber = (can_mailbox_number) mbxno; if (pru_can_get_data_from_mailbox(&priv->can_rx_hndl)) { __can_err("pru_can_get_data_from_mailbox: failed\n"); return EAGAIN; } /* give ownweship to pru */ pru_can_transfer(mbxno, CAN_RX_PRU_0); /* get data length code */ cf->can_dlc = get_can_dlc(* ((u32 *) & priv->can_rx_hndl.strcanmailbox. u16datalength) & 0xF); if (cf->can_dlc <= 4) { ptr8data = &priv->can_rx_hndl.strcanmailbox.u8data3 + (4 - cf->can_dlc); for (count = 0; count < cf->can_dlc; count++) { *data++ = *ptr8data++; } } else { ptr8data = &priv->can_rx_hndl.strcanmailbox.u8data3; for (count = 0; count < 4; count++) { *data++ = *ptr8data++; } ptr8data = &priv->can_rx_hndl.strcanmailbox.u8data4 - (cf->can_dlc - 5); for (count = 0; count < cf->can_dlc - 4; count++) { *data++ = *ptr8data++; } } pru_can_mbx_data = *((u32 *) & priv->can_rx_hndl.strcanmailbox); /* get id extended or std */ if (pru_can_mbx_data & PRU_CANMID_IDE) cf->can_id = (pru_can_mbx_data & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (pru_can_mbx_data >> 18) & CAN_SFF_MASK; if (pru_can_mbx_data & CAN_RTR_FLAG) cf->can_id |= CAN_RTR_FLAG; netif_rx_ni(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; return 0; }
/* * Prepends an ISI header and sends a datagram. */ static int pn_send(struct sk_buff *skb, struct net_device *dev, u16 dst, u16 src, u8 res, u8 irq) { struct phonethdr *ph; int err, i; if (skb->len + 2 > 0xffff /* Phonet length field limit */ || skb->len + sizeof(struct phonethdr) > dev->mtu) { err = -EMSGSIZE; goto drop; } /* Broadcast sending is not implemented */ if (pn_addr(dst) == PNADDR_BROADCAST) { err = -EOPNOTSUPP; goto drop; } skb_reset_transport_header(skb); WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */ skb_push(skb, sizeof(struct phonethdr)); skb_reset_network_header(skb); ph = pn_hdr(skb); ph->pn_rdev = pn_dev(dst); ph->pn_sdev = pn_dev(src); ph->pn_res = res; ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph)); ph->pn_robj = pn_obj(dst); ph->pn_sobj = pn_obj(src); skb->protocol = htons(ETH_P_PHONET); skb->priority = 0; skb->dev = dev; PN_PRINTK("pn_send rdev %x sdev %x res %x robj %x sobj %x netdev=%s\n", ph->pn_rdev, ph->pn_sdev, ph->pn_res, ph->pn_robj, ph->pn_sobj, dev->name); PN_DATA_PRINTK("PHONET : skb data = %d\nPHONET :", skb->len); for (i = 1; i <= skb->len; i++) { PN_DATA_PRINTK(" %02x", skb->data[i-1]); if ((i%8) == 0) PN_DATA_PRINTK("\n"); } if (skb->pkt_type == PACKET_LOOPBACK) { skb_reset_mac_header(skb); skb_orphan(skb); err = (irq ? netif_rx(skb) : netif_rx_ni(skb)) ? -ENOBUFS : 0; } else { err = dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len); if (err < 0) { err = -EHOSTUNREACH; goto drop; } err = dev_queue_xmit(skb); if (unlikely(err > 0)) err = net_xmit_errno(err); } return err; drop: printk(KERN_DEBUG "pn_send DROP\n"); kfree_skb(skb); return err; }
/** * \fn wlanDrvIf_receivePacket * \brief Receive packet from from lower level * */ TI_BOOL wlanDrvIf_receivePacket(TI_HANDLE OsContext, void *pRxDesc ,void *pPacket, TI_UINT16 Length, TIntraBssBridge *pBridgeDecision) { TWlanDrvIfObj *drv = (TWlanDrvIfObj *)OsContext; unsigned char *pdata = (unsigned char *)((TI_UINT32)pRxDesc & ~(TI_UINT32)0x3); rx_head_t *rx_head = (rx_head_t *)(pdata - WSPI_PAD_BYTES - RX_HEAD_LEN_ALIGNED); struct sk_buff *skb = rx_head->skb; struct sk_buff *new_skb; EIntraBssBridgeDecision eBridge = INTRA_BSS_BRIDGE_NO_BRIDGE; skb->data = pPacket; skb_put(skb, Length); skb->dev = drv->netdev; drv->stats.rx_packets++; drv->stats.rx_bytes += skb->len; /* Intra BSS bridge section */ if(pBridgeDecision != NULL) { eBridge = pBridgeDecision->eDecision; } if(INTRA_BSS_BRIDGE_NO_BRIDGE == eBridge) { /* Forward packet to network stack*/ CL_TRACE_START_L1(); skb->protocol = eth_type_trans(skb, drv->netdev); skb->ip_summed = CHECKSUM_NONE; netif_rx_ni(skb); /* Note: Don't change this trace (needed to exclude OS processing from Rx CPU utilization) */ CL_TRACE_END_L1("tiwlan_drv.ko", "OS", "RX", ""); } else if( INTRA_BSS_BRIDGE_UNICAST == eBridge) { /* Send packet to Tx */ TRACE2(drv->tCommon.hReport, REPORT_SEVERITY_WARNING, " wlanDrvIf_receivePacket() Unicast Bridge data=0x%x len=%d \n", RX_ETH_PKT_DATA(pPacket), RX_ETH_PKT_LEN(pPacket)); xmit_Bridge (skb, pDrvStaticHandle->netdev, pBridgeDecision); } else /* Broadcast/Multicast packet*/ { /* Duplicate packet*/ new_skb = skb_clone(skb, GFP_ATOMIC); skb->protocol = eth_type_trans(skb, drv->netdev); skb->ip_summed = CHECKSUM_NONE; netif_rx_ni(skb); if(new_skb) { xmit_Bridge (new_skb, pDrvStaticHandle->netdev, pBridgeDecision); } else { printk (KERN_ERR "%s: skb_clone failed\n", __FUNCTION__); return TI_FALSE; } } return TI_TRUE; }
/* * RX: normal working mode */ static void kni_net_rx_normal(struct kni_dev *kni) { unsigned ret; uint32_t len; unsigned i, num_rx, num_fq; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va[MBUF_BURST_SZ]; void * data_kva; struct sk_buff *skb; struct net_device *dev = kni->net_dev; /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); if (num_fq == 0) { /* No room on the free_q, bail out */ return; } /* Calculate the number of entries to dequeue from rx_q */ num_rx = min(num_fq, (unsigned)MBUF_BURST_SZ); /* Burst dequeue from rx_q */ num_rx = kni_fifo_get(kni->rx_q, (void **)va, num_rx); if (num_rx == 0) return; /* Transfer received packets to netif */ for (i = 0; i < num_rx; i++) { kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; len = kva->data_len; data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; skb = dev_alloc_skb(len + 2); if (!skb) { KNI_ERR("Out of mem, dropping pkts\n"); /* Update statistics */ kni->stats.rx_dropped++; } else { /* Align IP on 16B boundary */ skb_reserve(skb, 2); memcpy(skb_put(skb, len), data_kva, len); skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; /* Call netif interface */ netif_rx_ni(skb); /* Update statistics */ kni->stats.rx_bytes += len; kni->stats.rx_packets++; } } /* Burst enqueue mbufs into free_q */ ret = kni_fifo_put(kni->free_q, (void **)va, num_rx); if (ret != num_rx) /* Failing should not happen */ KNI_ERR("Fail to enqueue entries into free_q\n"); }
static void cyrf6936_rx_tx(struct cyrf6936_net *p) { u8 val, *data; size_t rx_len; struct sk_buff *skb; /* update signal level */ cyrf6936_iw_rssi(p); /* rx */ val = cyrf6936_rreg(p, RX_IRQ_STATUS); if (val & RXE_IRQ) goto err_rxe; if (val & RXC_IRQ) { /* debouncing, 2nd read */ val = cyrf6936_rreg(p, RX_IRQ_STATUS); if (val & RXE_IRQ) goto err_rxe; /* get data length*/ rx_len = cyrf6936_rreg(p, RX_LENGTH); /* allocate buffer */ skb = dev_alloc_skb(rx_len + NET_IP_ALIGN); if (!skb) goto err_oom; skb_reserve(skb, NET_IP_ALIGN); data = skb_put(skb, rx_len); /* read data */ cyrf6936_wreg(p, RX_IRQ_STATUS, RXOW_IRQ); while (rx_len--) *data++ = cyrf6936_rreg(p, RX_BUFFER); /* dump received packet data */ if (netif_msg_pktdata(p)) print_hex_dump_bytes("cyrf6936 rx data: ", DUMP_PREFIX_NONE, skb->data, skb->len); skb->dev = p->netdev; skb->protocol = htons(ETH_P_ALL); skb->ip_summed = CHECKSUM_UNNECESSARY; p->stats.rx_packets++; p->stats.rx_bytes += rx_len; netif_rx_ni(skb); cyrf6936_rx_enable(p); } /* tx */ val = cyrf6936_rreg(p, TX_IRQ_STATUS); if (val & TXE_IRQ) goto err_txe; if (val & TXC_IRQ) { /* debouncing, 2nd read */ val = cyrf6936_rreg(p, TX_IRQ_STATUS); if (val & TXE_IRQ) goto err_txe; /* tx ok*/ p->stats.tx_packets++; p->stats.tx_bytes += p->tx_skb->len; if (netif_msg_tx_done(p)) dev_dbg(&p->netdev->dev, "tx done\n"); dev_kfree_skb(p->tx_skb); p->tx_skb = NULL; netif_wake_queue(p->netdev); cyrf6936_rx_enable(p); } if (!p->pollmode) enable_irq(p->netdev->irq); return; err_rxe: if (netif_msg_rx_err(p)) dev_info(&p->netdev->dev, "rx error\n"); p->stats.rx_errors++; cyrf6936_rx_enable(p); return; err_txe: if (netif_msg_tx_err(p)) dev_info(&p->netdev->dev, "tx error\n"); p->stats.tx_errors++; cyrf6936_rx_enable(p); return; err_oom: dev_err(&p->netdev->dev, "out of memory, packet dropped\n"); p->stats.rx_dropped++; cyrf6936_rx_enable(p); }
void send_radiotap_monitor_packets( PNET_DEV pNetDev, PNDIS_PACKET pRxPacket, VOID *dot11_hdr, UCHAR *pData, USHORT DataSize, UCHAR L2PAD, UCHAR PHYMODE, UCHAR BW, UCHAR ShortGI, UCHAR MCS, UCHAR LDPC, UCHAR LDPC_EX_SYM, UCHAR AMPDU, UCHAR STBC, UCHAR RSSI1, UCHAR *pDevName, UCHAR Channel, UCHAR CentralChannel, UCHAR sideband_index, UINT32 MaxRssi) { struct sk_buff *pOSPkt; int rate_index = 0; USHORT header_len = 0; UCHAR temp_header[40] = {0}; struct mtk_radiotap_header *mtk_rt_hdr; UINT32 varlen = 0, padding_len = 0; UINT64 tmp64; UINT32 tmp32; UINT16 tmp16; UCHAR *pos; DOT_11_HDR *pHeader = (DOT_11_HDR *)dot11_hdr; MEM_DBG_PKT_FREE_INC(pRxPacket); pOSPkt = RTPKT_TO_OSPKT(pRxPacket); pOSPkt->dev = pNetDev; if (pHeader->FC.Type == 0x2 /* FC_TYPE_DATA */) { DataSize -= LENGTH_802_11; if ((pHeader->FC.ToDs == 1) && (pHeader->FC.FrDs == 1)) header_len = LENGTH_802_11_WITH_ADDR4; else header_len = LENGTH_802_11; /* QOS */ if (pHeader->FC.SubType & 0x08) { header_len += 2; /* Data skip QOS contorl field */ DataSize -= 2; } /* Order bit: A-Ralink or HTC+ */ if (pHeader->FC.Order) { header_len += 4; /* Data skip HTC contorl field */ DataSize -= 4; } /* Copy Header */ if (header_len <= 40) NdisMoveMemory(temp_header, pData, header_len); /* skip HW padding */ if (L2PAD) pData += (header_len + 2); else pData += header_len; } if (DataSize < pOSPkt->len) { skb_trim(pOSPkt, DataSize); } else { skb_put(pOSPkt, (DataSize - pOSPkt->len)); } if ((pData - pOSPkt->data) > 0) { skb_put(pOSPkt, (pData - pOSPkt->data)); skb_pull(pOSPkt, (pData - pOSPkt->data)); } if (skb_headroom(pOSPkt) < (sizeof(*mtk_rt_hdr) + header_len)) { if (pskb_expand_head(pOSPkt, (sizeof(*mtk_rt_hdr) + header_len), 0, GFP_ATOMIC)) { DBGPRINT(RT_DEBUG_ERROR, ("%s : Reallocate header size of sk_buff fail!\n", __FUNCTION__)); goto err_free_sk_buff; } } if (header_len > 0) NdisMoveMemory(skb_push(pOSPkt, header_len), temp_header, header_len); /* tsf */ padding_len = ((varlen % 8) == 0) ? 0 : (8 - (varlen % 8)); varlen += (8 + padding_len); /* flags */ varlen += 1; /* rate */ if (PHYMODE < MODE_HTMIX) varlen += 1; /* channel frequency */ padding_len = ((varlen % 2) == 0) ? 0 : (2 - (varlen % 2)); varlen += (2 + padding_len); /* channel flags */ varlen += 2; /* MCS */ if ((PHYMODE == MODE_HTMIX) || (PHYMODE == MODE_HTGREENFIELD)) { /* known */ varlen += 1; /* flags */ varlen += 1; /* index */ varlen += 1; } /* A-MPDU */ if (AMPDU) { /* reference number */ padding_len = ((varlen % 4) == 0) ? 0 : (4 - (varlen % 4)); varlen += (4 + padding_len); /* flags */ varlen += 2; /* delimiter crc value */ varlen += 1; /* reserved */ varlen += 1; } /* VHT */ if (PHYMODE == MODE_VHT) { /* known */ padding_len = ((varlen % 2) == 0) ? 0 : (2 - (varlen % 2)); varlen += (2 + padding_len); /* flags */ varlen += 1; /* bandwidth */ varlen += 1; /* mcs_nss */ varlen += 4; /* coding */ varlen += 1; /* group_id */ varlen += 1; /* partial_aid */ varlen += 2; } mtk_rt_hdr = (struct mtk_radiotap_header *)skb_push(pOSPkt, sizeof(*mtk_rt_hdr) + varlen); NdisZeroMemory(mtk_rt_hdr, sizeof(*mtk_rt_hdr) + varlen); mtk_rt_hdr->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; mtk_rt_hdr->rt_hdr.it_pad = 0; mtk_rt_hdr->rt_hdr.it_len = cpu2le16(sizeof(*mtk_rt_hdr) + varlen); mtk_rt_hdr->rt_hdr.it_present = cpu2le32( (1 << IEEE80211_RADIOTAP_TSFT) | (1 << IEEE80211_RADIOTAP_FLAGS)); if (PHYMODE < MODE_HTMIX) { mtk_rt_hdr->rt_hdr.it_present |= cpu2le32(1 << IEEE80211_RADIOTAP_RATE); } mtk_rt_hdr->rt_hdr.it_present |= cpu2le32(1 << IEEE80211_RADIOTAP_CHANNEL); if ((PHYMODE == MODE_HTMIX) || (PHYMODE == MODE_HTGREENFIELD)) { mtk_rt_hdr->rt_hdr.it_present |= cpu2le32(1 << IEEE80211_RADIOTAP_MCS); } if (AMPDU) { mtk_rt_hdr->rt_hdr.it_present |= cpu2le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); } if (PHYMODE == MODE_VHT) mtk_rt_hdr->rt_hdr.it_present |= cpu2le32(1 << IEEE80211_RADIOTAP_VHT); varlen = 0; pos = mtk_rt_hdr->variable; padding_len = ((varlen % 8) == 0) ? 0 : (8 - (varlen % 8)); pos += padding_len; varlen += padding_len; /* tsf */ tmp64 = 0; NdisMoveMemory(pos, &tmp64, 8); pos += 8; varlen += 8; /* flags */ *pos = 0; pos++; varlen++; /* rate */ if (PHYMODE == MODE_OFDM) { rate_index = (UCHAR)(MCS) + 4; *pos = ralinkrate[rate_index]; pos++; varlen++; } else if (PHYMODE == MODE_CCK) { rate_index = (UCHAR)(MCS); *pos = ralinkrate[rate_index]; pos++; varlen++; } /* channel frequency */ padding_len = ((varlen % 2) == 0) ? 0 : (2 - (varlen % 2)); pos += padding_len; varlen += padding_len; #define ieee80211chan2mhz(x) \ (((x) <= 14) ? \ (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \ ((x) + 1000) * 5) tmp16 = cpu2le16(ieee80211chan2mhz(Channel)); NdisMoveMemory(pos, &tmp16, 2); pos += 2; varlen += 2; if (Channel > 14) { tmp16 = cpu2le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); } else { if (PHYMODE == MODE_CCK) { tmp16 = cpu2le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ); } else { tmp16 = cpu2le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); } } NdisMoveMemory(pos, &tmp16, 2); pos += 2; varlen += 2; /* HT MCS */ if ((PHYMODE == MODE_HTMIX) || (PHYMODE == MODE_HTGREENFIELD)) { *pos = (IEEE80211_RADIOTAP_MCS_HAVE_BW | IEEE80211_RADIOTAP_MCS_HAVE_MCS | IEEE80211_RADIOTAP_MCS_HAVE_GI | IEEE80211_RADIOTAP_MCS_HAVE_FMT | IEEE80211_RADIOTAP_MCS_HAVE_FEC); pos++; varlen++; /* BW */ if (BW == 0) { *pos = HT_BW(IEEE80211_RADIOTAP_MCS_BW_20); } else { *pos = HT_BW(IEEE80211_RADIOTAP_MCS_BW_40); } /* HT GI */ *pos |= HT_GI(ShortGI); /* HT format */ if (PHYMODE == MODE_HTMIX) *pos |= HT_FORMAT(0); else if (PHYMODE == MODE_HTGREENFIELD) *pos |= HT_FORMAT(1); /* HT FEC type */ *pos |= HT_FEC_TYPE(LDPC); pos++; varlen++; /* HT mcs index */ *pos = MCS; pos++; varlen++; } if (AMPDU) { /* reference number */ padding_len = ((varlen % 4) == 0) ? 0 : (4 - (varlen % 4)); varlen += padding_len; pos += padding_len; tmp32 = 0; NdisMoveMemory(pos, &tmp32, 4); pos += 4; varlen += 2; /* flags */ tmp16 = 0; NdisMoveMemory(pos, &tmp16, 2); pos += 2; varlen += 2; /* delimiter CRC value */ *pos = 0; pos++; varlen++; /* reserved */ *pos = 0; pos++; varlen++; } #ifdef DOT11_VHT_AC /* VHT */ if (PHYMODE == MODE_VHT) { /* known */ padding_len = ((varlen % 2) == 0) ? 0 : (2 - (varlen % 2)); varlen += padding_len; pos += padding_len; tmp16 = cpu2le16(IEEE80211_RADIOTAP_VHT_KNOWN_STBC | IEEE80211_RADIOTAP_VHT_KNOWN_GI | IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM | IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH); NdisMoveMemory(pos, &tmp16, 2); pos += 2; varlen += 2; /* flags */ *pos = (STBC?IEEE80211_RADIOTAP_VHT_FLAG_STBC:0); *pos |= (ShortGI?IEEE80211_RADIOTAP_VHT_FLAG_SGI:0); *pos |= (LDPC_EX_SYM?IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM:0); pos++; varlen++; /* bandwidth */ if (BW == 0) { *pos = 0; } else if (BW == 1) { *pos = 1; } else if (BW == 2) { *pos = 4; #if 0 if (sideband_index == 0) *pos = 7; /* 20LL */ else if (sideband_index == 1) *pos = 8; /* 20LU */ else if (sideband_index == 2) *pos = 9; /* 20UL */ else if (sideband_index == 3) *pos = 10; /* 20UU */ #endif } else { DBGPRINT(RT_DEBUG_ERROR, ("%s:unknow bw(%d)\n", __FUNCTION__, BW)); } /* mcs_nss */ pos++; varlen++; /* vht_mcs_nss[0] */ *pos = (GET_VHT_NSS(MCS) & 0x0f); *pos |= ((GET_VHT_MCS(MCS) & 0x0f) << 4); pos++; varlen++; /* vht_mcs_nss[1] */ *pos = 0; pos++; varlen++; /* vht_mcs_nss[2] */ *pos = 0; pos++; varlen++; /* vht_mcs_nss[3] */ *pos = 0; pos++; varlen++; /* coding */ if (LDPC) *pos = 1; else *pos = 0; pos++; varlen++; /* group_id */ *pos = 0; pos++; varlen++; /* partial aid */ tmp16 = 0; NdisMoveMemory(pos, &tmp16, 2); pos += 2; varlen += 2; } #endif /* DOT11_VHT_AC */ pOSPkt->dev = pOSPkt->dev; skb_reset_mac_header(pOSPkt); pOSPkt->pkt_type = PACKET_OTHERHOST; pOSPkt->protocol = __constant_htons(ETH_P_80211_RAW); pOSPkt->ip_summed = CHECKSUM_NONE; netif_rx_ni(pOSPkt); return; err_free_sk_buff: RELEASE_NDIS_PACKET(NULL, pRxPacket, NDIS_STATUS_FAILURE); return; }
static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) { int rx_sz = 0; int nfrms = 0; u16 *plen = NULL; u8 *pfrm = NULL; /* Sanity check header and offset. */ if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) || (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", __func__); return -EPROTO; } /* Set frame pointer to start of payload. */ pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; plen = desc->cffrm_len; /* Skip already processed frames. */ while (nfrms < cfhsi->rx_state.nfrms) { pfrm += *plen; rx_sz += *plen; plen++; nfrms++; } /* Parse payload. */ while (nfrms < CFHSI_MAX_PKTS && *plen) { struct sk_buff *skb; u8 *dst = NULL; u8 *pcffrm = NULL; int len = 0; /* CAIF frame starts after head padding. */ pcffrm = pfrm + *pfrm + 1; /* Read length of CAIF frame (little endian). */ len = *pcffrm; len |= ((*(pcffrm + 1)) << 8) & 0xFF00; len += 2; /* Add FCS fields. */ /* Sanity check length of CAIF frames. */ if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n", __func__); return -EPROTO; } /* Allocate SKB (OK even in IRQ context). */ skb = alloc_skb(len + 1, GFP_ATOMIC); if (!skb) { dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", __func__); cfhsi->rx_state.nfrms = nfrms; return -ENOMEM; } caif_assert(skb != NULL); dst = skb_put(skb, len); memcpy(dst, pcffrm, len); skb->protocol = htons(ETH_P_CAIF); skb_reset_mac_header(skb); skb->dev = cfhsi->ndev; /* * We're called from a platform device, * and don't know the context we're running in. */ if (in_interrupt()) netif_rx(skb); else netif_rx_ni(skb); cfhsi_notify_rx(cfhsi); /* Update network statistics. */ cfhsi->ndev->stats.rx_packets++; cfhsi->ndev->stats.rx_bytes += len; pfrm += *plen; rx_sz += *plen; plen++; nfrms++; } return rx_sz; }
/**************************************************************************************** * os_receivePacket() **************************************************************************************** DESCRIPTION: ARGUMENTS: RETURN: NOTES: *****************************************************************************************/ TI_BOOL os_receivePacket(TI_HANDLE OsContext, void *pRxDesc ,void *pPacket, TI_UINT16 Length) { TWlanDrvIfObj *drv = (TWlanDrvIfObj *)OsContext; unsigned char *pdata = (unsigned char *)((TI_UINT32)pPacket & ~(TI_UINT32)0x3); rx_head_t *rx_head = (rx_head_t *)(pdata - WSPI_PAD_BYTES - RX_HEAD_LEN_ALIGNED); struct sk_buff *skb = rx_head->skb; #ifdef TI_DBG if ((TI_UINT32)pPacket & 0x3) { if ((TI_UINT32)pPacket - (TI_UINT32)skb->data != 2) { printk("os_receivePacket() address error skb=0x%x skb->data=0x%x pPacket=0x%x !!!\n",(int)skb, (int)skb->data, (int)pPacket); } } else { if ((TI_UINT32)skb->data != (TI_UINT32)pPacket) { printk("os_receivePacket() address error skb=0x%x skb->data=0x%x pPacket=0x%x !!!\n",(int)skb, (int)skb->data, (int)pPacket); } } if (Length != RX_ETH_PKT_LEN(pPacket)) { printk("os_receivePacket() Length=%d != RX_ETH_PKT_LEN(pPacket)=%d!!!\n",(int)Length, RX_ETH_PKT_LEN(pPacket)); } #endif /* printk("-->> os_receivePacket() pPacket=0x%x Length=%d skb=0x%x skb->data=0x%x skb->head=0x%x skb->len=%d\n", (int)pPacket, (int)Length, (int)skb, (int)skb->data, (int)skb->head, (int)skb->len); */ /* Use skb_reserve, it updates both skb->data and skb->tail. */ skb->data = RX_ETH_PKT_DATA(pPacket); skb->tail = skb->data; skb_put(skb, RX_ETH_PKT_LEN(pPacket)); /* printk("-->> os_receivePacket() skb=0x%x skb->data=0x%x skb->head=0x%x skb->len=%d\n", (int)skb, (int)skb->data, (int)skb->head, (int)skb->len); */ ti_nodprintf(TIWLAN_LOG_INFO, "os_receivePacket - Received EAPOL len-%d\n", WBUF_LEN(pWbuf)); skb->dev = drv->netdev; skb->protocol = eth_type_trans(skb, drv->netdev); skb->ip_summed = CHECKSUM_NONE; drv->stats.rx_packets++; drv->stats.rx_bytes += skb->len; /* Send the skb to the TCP stack. * it responsibly of the Linux kernel to free the skb */ { CL_TRACE_START_L1(); /* Prevent system suspend one more second after WLAN task completion (in case of more Rx packets) */ os_WakeLockTimeoutEnable(drv); netif_rx_ni(skb); /* Note: Don't change this trace (needed to exclude OS processing from Rx CPU utilization) */ CL_TRACE_END_L1("tiwlan_drv.ko", "OS", "RX", ""); } return TI_TRUE; }
static int qcaspi_receive(struct qcaspi *qca) { struct net_device *net_dev = qca->net_dev; struct net_device_stats *n_stats = &net_dev->stats; u16 available = 0; u32 bytes_read; u8 *cp; /* Allocate rx SKB if we don't have one available. */ if (!qca->rx_skb) { qca->rx_skb = netdev_alloc_skb(net_dev, net_dev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) { netdev_dbg(net_dev, "out of RX resources\n"); qca->stats.out_of_mem++; return -1; } } /* Read the packet size. */ qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available); netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", available); if (available == 0) { netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n"); return -1; } qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available); if (qca->legacy_mode) qcaspi_tx_cmd(qca, QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); while (available) { u32 count = available; if (count > qca->burst_len) count = qca->burst_len; if (qca->legacy_mode) { bytes_read = qcaspi_read_legacy(qca, qca->rx_buffer, count); } else { bytes_read = qcaspi_read_burst(qca, qca->rx_buffer, count); } netdev_dbg(net_dev, "available: %d, byte read: %d\n", available, bytes_read); if (bytes_read) { available -= bytes_read; } else { qca->stats.read_err++; return -1; } cp = qca->rx_buffer; while ((bytes_read--) && (qca->rx_skb)) { s32 retcode; retcode = qcafrm_fsm_decode(&qca->frm_handle, qca->rx_skb->data, skb_tailroom(qca->rx_skb), *cp); cp++; switch (retcode) { case QCAFRM_GATHER: case QCAFRM_NOHEAD: break; case QCAFRM_NOTAIL: netdev_dbg(net_dev, "no RX tail\n"); n_stats->rx_errors++; n_stats->rx_dropped++; break; case QCAFRM_INVLEN: netdev_dbg(net_dev, "invalid RX length\n"); n_stats->rx_errors++; n_stats->rx_dropped++; break; default: qca->rx_skb->dev = qca->net_dev; n_stats->rx_packets++; n_stats->rx_bytes += retcode; skb_put(qca->rx_skb, retcode); qca->rx_skb->protocol = eth_type_trans( qca->rx_skb, qca->rx_skb->dev); qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; netif_rx_ni(qca->rx_skb); qca->rx_skb = netdev_alloc_skb(net_dev, net_dev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) { netdev_dbg(net_dev, "out of RX resources\n"); n_stats->rx_errors++; qca->stats.out_of_mem++; break; } } } } return 0; }
/* Send one completely decapsulated can_frame to the network layer */ static void slc_bump(struct slcan *sl) { struct sk_buff *skb; struct can_frame cf; int i, dlc_pos, tmp; unsigned long ultmp; char cmd = sl->rbuff[0]; if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R')) return; if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */ dlc_pos = 4; /* dlc position tiiid */ else dlc_pos = 9; /* dlc position Tiiiiiiiid */ if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9'))) return; cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */ sl->rbuff[dlc_pos] = 0; /* terminate can_id string */ if (strict_strtoul(sl->rbuff+1, 16, &ultmp)) return; cf.can_id = ultmp; if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */ cf.can_id |= CAN_EFF_FLAG; if ((cmd | 0x20) == 'r') /* RTR frame */ cf.can_id |= CAN_RTR_FLAG; *(u64 *) (&cf.data) = 0; /* clear payload */ for (i = 0, dlc_pos++; i < cf.can_dlc; i++) { tmp = asc2nibble(sl->rbuff[dlc_pos++]); if (tmp > 0x0F) return; cf.data[i] = (tmp << 4); tmp = asc2nibble(sl->rbuff[dlc_pos++]); if (tmp > 0x0F) return; cf.data[i] |= tmp; } skb = dev_alloc_skb(sizeof(struct can_frame)); if (!skb) return; skb->dev = sl->dev; skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; memcpy(skb_put(skb, sizeof(struct can_frame)), &cf, sizeof(struct can_frame)); netif_rx_ni(skb); sl->dev->stats.rx_packets++; sl->dev->stats.rx_bytes += cf.can_dlc; }
static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb) { return netif_rx_ni(skb); }
void hdd_sendMgmtFrameOverMonitorIface( hdd_adapter_t *pMonAdapter, tANI_U32 nFrameLength, tANI_U8* pbFrames, tANI_U8 frameType ) { //Indicate a Frame over Monitor Intf. int rxstat; struct sk_buff *skb = NULL; int needed_headroom = 0; int flag = HDD_RX_FLAG_IV_STRIPPED | HDD_RX_FLAG_DECRYPTED | HDD_RX_FLAG_MMIC_STRIPPED; #ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK hdd_context_t* pHddCtx = (hdd_context_t*)(pMonAdapter->pHddCtx); #endif hddLog( LOG1, FL("Indicate Frame over Monitor Intf")); VOS_ASSERT( (pbFrames != NULL) ); /* room for the radiotap header based on driver features * 1 Byte for RADIO TAP Flag, 1 Byte padding and 2 Byte for * RX flags. * */ needed_headroom = sizeof(struct ieee80211_radiotap_header) + 4; //alloc skb here skb = alloc_skb(VPKT_SIZE_BUFFER, GFP_ATOMIC); if (unlikely(NULL == skb)) { hddLog( LOGW, FL("Unable to allocate skb")); return; } skb_reserve(skb, VPKT_SIZE_BUFFER); if (unlikely(skb_headroom(skb) < nFrameLength)) { VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "HDD [%d]: Insufficient headroom, " "head[%p], data[%p], req[%d]", __LINE__, skb->head, skb->data, nFrameLength); kfree_skb(skb); return ; } // actually push the data memcpy(skb_push(skb, nFrameLength), pbFrames, nFrameLength); /* prepend radiotap information */ if( 0 != hdd_wlan_add_rx_radiotap_hdr( skb, needed_headroom, flag ) ) { hddLog( LOGE, FL("Not Able Add Radio Tap")); //free skb kfree_skb(skb); return ; } skb_reset_mac_header( skb ); skb->dev = pMonAdapter->dev; skb->protocol = eth_type_trans( skb, skb->dev ); skb->ip_summed = CHECKSUM_NONE; #ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK wake_lock_timeout(&pHddCtx->rx_wake_lock, HDD_WAKE_LOCK_DURATION); #endif rxstat = netif_rx_ni(skb); if( NET_RX_SUCCESS == rxstat ) { hddLog( LOG1, FL("Success")); } else hddLog( LOGE, FL("Failed %d"), rxstat); return ; }
static void hdd_wlan_tx_complete( hdd_adapter_t* pAdapter, hdd_cfg80211_state_t* cfgState, tANI_BOOLEAN actionSendSuccess ) { struct ieee80211_radiotap_header *rthdr; unsigned char *pos; struct sk_buff *skb = cfgState->skb; #ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK hdd_context_t *pHddCtx = (hdd_context_t*)(pAdapter->pHddCtx); #endif /* 2 Byte for TX flags and 1 Byte for Retry count */ u32 rtHdrLen = sizeof(*rthdr) + 3; u8 *data; /* We have to return skb with Data starting with MAC header. We have * copied SKB data starting with MAC header to cfgState->buf. We will pull * entire skb->len from skb and then we will push cfgState->buf to skb * */ if( NULL == skb_pull(skb, skb->len) ) { hddLog( LOGE, FL("Not Able to Pull %d byte from skb"), skb->len); kfree_skb(cfgState->skb); return; } data = skb_push( skb, cfgState->len ); if (data == NULL) { hddLog( LOGE, FL("Not Able to Push %d byte to skb"), cfgState->len); kfree_skb( cfgState->skb ); return; } memcpy( data, cfgState->buf, cfgState->len ); /* send frame to monitor interfaces now */ if( skb_headroom(skb) < rtHdrLen ) { hddLog( LOGE, FL("No headroom for rtap header")); kfree_skb(cfgState->skb); return; } rthdr = (struct ieee80211_radiotap_header*) skb_push( skb, rtHdrLen ); memset( rthdr, 0, rtHdrLen ); rthdr->it_len = cpu_to_le16( rtHdrLen ); rthdr->it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) | (1 << IEEE80211_RADIOTAP_DATA_RETRIES) ); pos = (unsigned char *)( rthdr+1 ); // Fill TX flags *pos = actionSendSuccess; pos += 2; // Fill retry count *pos = 0; pos++; skb_set_mac_header( skb, 0 ); skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset( skb->cb, 0, sizeof( skb->cb ) ); #ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK wake_lock_timeout(&pHddCtx->rx_wake_lock, HDD_WAKE_LOCK_DURATION); #endif if (in_interrupt()) netif_rx( skb ); else netif_rx_ni( skb ); /* Enable Queues which we have disabled earlier */ netif_tx_start_all_queues( pAdapter->dev ); }
/** * @brief This function processes received packet and forwards it * to kernel/upper layer * * @param priv A pointer to struct lbs_private * @param skb A pointer to skb which includes the received packet * @return 0 or -1 */ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) { int ret = 0; struct net_device *dev = priv->dev; struct rxpackethdr *p_rx_pkt; struct rxpd *p_rx_pd; int hdrchop; struct ethhdr *p_ethhdr; const u8 rfc1042_eth_hdr[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; lbs_deb_enter(LBS_DEB_RX); BUG_ON(!skb); skb->ip_summed = CHECKSUM_NONE; if (priv->monitormode) return process_rxed_802_11_packet(priv, skb); p_rx_pkt = (struct rxpackethdr *) skb->data; p_rx_pd = &p_rx_pkt->rx_pd; if (priv->mesh_dev && (p_rx_pd->rx_control & RxPD_MESH_FRAME)) dev = priv->mesh_dev; lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min_t(unsigned int, skb->len, 100)); if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { lbs_deb_rx("rx err: frame received with bad length\n"); dev->stats.rx_length_errors++; ret = 0; dev_kfree_skb(skb); goto done; } /* * Check rxpd status and update 802.3 stat, */ if (!(p_rx_pd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK))) { lbs_deb_rx("rx err: frame received with bad status\n"); lbs_pr_alert("rxpd not ok\n"); dev->stats.rx_errors++; ret = 0; dev_kfree_skb(skb); goto done; } lbs_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n", skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd)); lbs_deb_hex(LBS_DEB_RX, "RX Data: Dest", p_rx_pkt->eth803_hdr.dest_addr, sizeof(p_rx_pkt->eth803_hdr.dest_addr)); lbs_deb_hex(LBS_DEB_RX, "RX Data: Src", p_rx_pkt->eth803_hdr.src_addr, sizeof(p_rx_pkt->eth803_hdr.src_addr)); if (memcmp(&p_rx_pkt->rfc1042_hdr, rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)) == 0) { /* * Replace the 803 header and rfc1042 header (llc/snap) with an * EthernetII header, keep the src/dst and snap_type (ethertype) * * The firmware only passes up SNAP frames converting * all RX Data from 802.11 to 802.2/LLC/SNAP frames. * * To create the Ethernet II, just move the src, dst address right * before the snap_type. */ p_ethhdr = (struct ethhdr *) ((u8 *) & p_rx_pkt->eth803_hdr + sizeof(p_rx_pkt->eth803_hdr) + sizeof(p_rx_pkt->rfc1042_hdr) - sizeof(p_rx_pkt->eth803_hdr.dest_addr) - sizeof(p_rx_pkt->eth803_hdr.src_addr) - sizeof(p_rx_pkt->rfc1042_hdr.snap_type)); memcpy(p_ethhdr->h_source, p_rx_pkt->eth803_hdr.src_addr, sizeof(p_ethhdr->h_source)); memcpy(p_ethhdr->h_dest, p_rx_pkt->eth803_hdr.dest_addr, sizeof(p_ethhdr->h_dest)); /* Chop off the rxpd + the excess memory from the 802.2/llc/snap header * that was removed */ hdrchop = (u8 *) p_ethhdr - (u8 *) p_rx_pkt; } else { lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP", (u8 *) & p_rx_pkt->rfc1042_hdr, sizeof(p_rx_pkt->rfc1042_hdr)); /* Chop off the rxpd */ hdrchop = (u8 *) & p_rx_pkt->eth803_hdr - (u8 *) p_rx_pkt; } /* Chop off the leading header bytes so the skb points to the start of * either the reconstructed EthII frame or the 802.2/llc/snap frame */ skb_pull(skb, hdrchop); /* Take the data rate from the rxpd structure * only if the rate is auto */ if (priv->enablehwauto) priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate); lbs_compute_rssi(priv, p_rx_pd); lbs_deb_rx("rx data: size of actual packet %d\n", skb->len); dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; skb->protocol = eth_type_trans(skb, dev); if (in_interrupt()) netif_rx(skb); else netif_rx_ni(skb); ret = 0; done: lbs_deb_leave_args(LBS_DEB_RX, "ret %d", ret); return ret; }
static void adapter_rx_packet(struct net_adapter *adapter, struct buffer_descriptor *bufdsc) { struct hw_packet_header *hdr; s32 rlen = bufdsc->length; u32 l; u8 *ofs; struct sk_buff *rx_skb; ofs = (u8 *)bufdsc->buffer; while (rlen > 0) { hdr = (struct hw_packet_header *)ofs; /* "WD", "WC", "WP" or "WE" */ if (unlikely(hdr->id0 != 'W')) { /*Ignore if it is the 4 byte allignment*/ pr_warn("Wrong packet \ ID (%02x %02x) rlen = %d\n", hdr->id0, hdr->id1, rlen); /* skip rest of packets */ break; } /* change offset */ ofs += sizeof(*hdr); rlen -= sizeof(*hdr); /* check packet type */ switch (hdr->id1) { case 'P': { /* revert offset */ ofs -= sizeof(*hdr); rlen += sizeof(*hdr); /* process packet */ l = process_private_cmd(adapter, ofs); /* shift */ ofs += l; rlen -= l; /* process next packet */ continue; } case 'C': if (!adapter->downloading) { ofs += 2; rlen -= 2; control_recv(adapter, (u8 *)ofs, hdr->length); break; } else { hdr->length -= sizeof(*hdr); process_indicate_packet(adapter, ofs); break; } case 'D': ofs += 2; rlen -= 2; if (hdr->length > BUFFER_DATA_SIZE) { pr_warn("Data packet too large"); adapter->netstats.rx_dropped++; break; } if (likely(hdr->length <= (WIMAX_MTU_SIZE + 2))) { rx_skb = cmc7xx_fetch_skb(adapter); if (!rx_skb) { pr_err("unable to allocate skb"); break; } } else { rx_skb = dev_alloc_skb(hdr->length + (ETHERNET_ADDRESS_LENGTH * 2) + NET_IP_ALIGN); if (!rx_skb) { pr_err("unable to allocate skb"); break; } cmc7xx_prepare_skb(adapter, rx_skb); } memcpy(skb_put(rx_skb, hdr->length), (u8 *)ofs, hdr->length); rx_skb->protocol = eth_type_trans(rx_skb, adapter->net); if (netif_rx_ni(rx_skb) == NET_RX_DROP) { pr_debug("packet dropped!"); adapter->netstats.rx_dropped++; } adapter->netstats.rx_packets++; adapter->netstats.rx_bytes += (hdr->length + (ETHERNET_ADDRESS_LENGTH * 2)); break; case 'E': pr_warn("%s :Wrong packet Extended ID [%02x %02x]", __func__, hdr->id0, hdr->id1); /* skip rest of buffer */ goto out; default: pr_warn("%s :Wrong packet ID [%02x %02x]", __func__, hdr->id0, hdr->id1); /* skip rest of buffer */ goto out; } ofs += hdr->length; rlen -= hdr->length; }