static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) { struct net_device *dev = (struct net_device *)vcc->proto_data; struct sk_buff *new_skb; eg_cache_entry *eg; struct mpoa_client *mpc; uint32_t tag; char *tmp; ddprintk("mpoa: (%s) mpc_push:\n", dev->name); if (skb == NULL) { dprintk("mpoa: (%s) mpc_push: null skb, closing VCC\n", dev->name); mpc_vcc_close(vcc, dev); return; } skb->dev = dev; if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) { struct sock *sk = sk_atm(vcc); dprintk("mpoa: (%s) mpc_push: control packet arrived\n", dev->name); /* Pass control packets to daemon */ skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); return; } /* data coming over the shortcut */ atm_return(vcc, skb->truesize); mpc = find_mpc_by_lec(dev); if (mpc == NULL) { printk("mpoa: (%s) mpc_push: unknown MPC\n", dev->name); return; } if (memcmp(skb->data, &llc_snap_mpoa_data_tagged, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */ ddprintk("mpoa: (%s) mpc_push: tagged data packet arrived\n", dev->name); } else if (memcmp(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */ printk("mpoa: (%s) mpc_push: non-tagged data packet arrived\n", dev->name); printk(" mpc_push: non-tagged data unsupported, purging\n"); dev_kfree_skb_any(skb); return; } else { printk("mpoa: (%s) mpc_push: garbage arrived, purging\n", dev->name); dev_kfree_skb_any(skb); return; } tmp = skb->data + sizeof(struct llc_snap_hdr); tag = *(uint32_t *)tmp; eg = mpc->eg_ops->get_by_tag(tag, mpc); if (eg == NULL) { printk("mpoa: (%s) mpc_push: Didn't find egress cache entry, tag = %u\n", dev->name,tag); purge_egress_shortcut(vcc, NULL); dev_kfree_skb_any(skb); return; } /* * See if ingress MPC is using shortcut we opened as a return channel. * This means we have a bi-directional vcc opened by us. */ if (eg->shortcut == NULL) { eg->shortcut = vcc; printk("mpoa: (%s) mpc_push: egress SVC in use\n", dev->name); } skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag)); /* get rid of LLC/SNAP header */ new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length); /* LLC/SNAP is shorter than MAC header :( */ dev_kfree_skb_any(skb); if (new_skb == NULL){ mpc->eg_ops->put(eg); return; } skb_push(new_skb, eg->ctrl_info.DH_length); /* add MAC header */ memcpy(new_skb->data, eg->ctrl_info.DLL_header, eg->ctrl_info.DH_length); new_skb->protocol = eth_type_trans(new_skb, dev); new_skb->nh.raw = new_skb->data; eg->latest_ip_addr = new_skb->nh.iph->saddr; eg->packets_rcvd++; mpc->eg_ops->put(eg); memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(new_skb); return; }
static void mc32_rx_ring(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); volatile struct skb_header *p; u16 rx_ring_tail; u16 rx_old_tail; int x=0; rx_old_tail = rx_ring_tail = lp->rx_ring_tail; do { p=lp->rx_ring[rx_ring_tail].p; if(!(p->status & (1<<7))) { /* Not COMPLETED */ break; } if(p->status & (1<<6)) /* COMPLETED_OK */ { u16 length=p->length; struct sk_buff *skb; struct sk_buff *newskb; /* Try to save time by avoiding a copy on big frames */ if ((length > RX_COPYBREAK) && ((newskb=dev_alloc_skb(1532)) != NULL)) { skb=lp->rx_ring[rx_ring_tail].skb; skb_put(skb, length); skb_reserve(newskb,18); lp->rx_ring[rx_ring_tail].skb=newskb; p->data=isa_virt_to_bus(newskb->data); } else { skb=dev_alloc_skb(length+2); if(skb==NULL) { lp->net_stats.rx_dropped++; goto dropped; } skb_reserve(skb,2); memcpy(skb_put(skb, length), lp->rx_ring[rx_ring_tail].skb->data, length); } skb->protocol=eth_type_trans(skb,dev); dev->last_rx = jiffies; lp->net_stats.rx_packets++; lp->net_stats.rx_bytes += length; netif_rx(skb); } dropped: p->length = 1532; p->status = 0; rx_ring_tail=next_rx(rx_ring_tail); } while(x++<48); /* If there was actually a frame to be processed, place the EOL bit */ /* at the descriptor prior to the one to be filled next */ if (rx_ring_tail != rx_old_tail) { lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL; lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL; lp->rx_ring_tail=rx_ring_tail; } }
/* * Au1000 receive routine. */ static int au1000_rx(struct net_device *dev) { struct au1000_private *aup = (struct au1000_private *) dev->priv; struct sk_buff *skb; volatile rx_dma_t *prxd; u32 buff_stat, status; db_dest_t *pDB; u32 frmlen; if (au1000_debug > 5) printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head); prxd = aup->rx_dma_ring[aup->rx_head]; buff_stat = prxd->buff_stat; while (buff_stat & RX_T_DONE) { status = prxd->status; pDB = aup->rx_db_inuse[aup->rx_head]; update_rx_stats(dev, status); if (!(status & RX_ERROR)) { /* good frame */ frmlen = (status & RX_FRAME_LEN_MASK); frmlen -= 4; /* Remove FCS */ skb = dev_alloc_skb(frmlen + 2); if (skb == NULL) { printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); aup->stats.rx_dropped++; continue; } skb->dev = dev; skb_reserve(skb, 2); /* 16 byte IP header align */ eth_copy_and_sum(skb, (unsigned char *)pDB->vaddr, frmlen, 0); skb_put(skb, frmlen); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); /* pass the packet to upper layers */ } else { if (au1000_debug > 4) { if (status & RX_MISSED_FRAME) printk("rx miss\n"); if (status & RX_WDOG_TIMER) printk("rx wdog\n"); if (status & RX_RUNT) printk("rx runt\n"); if (status & RX_OVERLEN) printk("rx overlen\n"); if (status & RX_COLL) printk("rx coll\n"); if (status & RX_MII_ERROR) printk("rx mii error\n"); if (status & RX_CRC_ERROR) printk("rx crc error\n"); if (status & RX_LEN_ERROR) printk("rx len error\n"); if (status & RX_U_CNTRL_FRAME) printk("rx u control frame\n"); if (status & RX_MISSED_FRAME) printk("rx miss\n"); } } prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); au_sync(); /* next descriptor */ prxd = aup->rx_dma_ring[aup->rx_head]; buff_stat = prxd->buff_stat; dev->last_rx = jiffies; } return 0; }
/* Receive data from TTY and forward it to TCP/IP stack The implementation also handles partial packets. Ethernet hdr is appended to IP datagram before passing it to TCPIP stack. */ void rmnet_netif_rx_cb(struct net_device *dev, const unsigned char *buf, int sz) { struct rmnet_private *p = NULL; int ver = 0; unsigned short tempval = 0; pr_data_info("%s++\n", __FUNCTION__); //pr_data_info("<<<<<=================\n%s++\n",__FUNCTION__); //pr_data_info("dev->name = %s\n sz = %d\n", dev->name, sz); p = netdev_priv(dev); if(!p){ pr_err("%s:Error tty ldisc not opened\n", __FUNCTION__); return; } while(sz) { if (0 == p->len) { #if defined(__BIG_ENDIAN_BITFIELD) ver = buf[0] & 0x0F; #elif defined(__LITTLE_ENDIAN_BITFIELD) ver = (buf[0] & 0xF0) >> 4; #endif if (ver == RMNET_IPV4_VER) { p->len = (buf[2]<<8)| buf[3]; //pr_data_info("transport proto type = %d\n", buf[9]); } else if (ver == RMNET_IPV6_VER) { p->len = IPV6_HEADER_SZ + ((buf[4]<<8)| buf[5]); } else { pr_err("%s:!!!!!!!!!!!!Wrong Version: sz = %d\n\n", __FUNCTION__, sz); return; } //pr_info("sz = %d, len = %d\n", sz, p->len); if (p->len + RMNET_ETH_HDR_SIZE > RMNET_MTU_SIZE) { p->ptr = NULL; sz -= p->len; buf += p->len; p->len = 0; continue; } else { p->len += RMNET_ETH_HDR_SIZE; p->skb = dev_alloc_skb(p->len + NET_IP_ALIGN); if (p->skb == NULL) { /* TODO: We need to handle this case later */ pr_err("%s:!!!!!!!!!!!!!!!!!!skbuf alloc failed!!!!!!!!!!!!!!\n\n", __FUNCTION__); return; } p->skb->dev = dev; skb_reserve(p->skb, NET_IP_ALIGN); p->ptr = skb_put(p->skb, p->len); /* adding ethernet header */ { char temp[] = {0xB6,0x91,0x24,0xa8,0x14,0x72,0xb6,0x91,0x24, 0xa8,0x14,0x72,0x08,0x0}; struct ethhdr *eth_hdr = (struct ethhdr *) temp; if (ver == RMNET_IPV6_VER) { eth_hdr->h_proto = htons(IPV6_PROTO_TYPE); } memcpy((void *)eth_hdr->h_dest, (void*)dev->dev_addr, sizeof(eth_hdr->h_dest)); memcpy((void *)(p->ptr), (void *)eth_hdr, sizeof(struct ethhdr)); } } } tempval = (sz < (p->len - RMNET_ETH_HDR_SIZE - p->ip_data_in_skb))? sz:(p->len - RMNET_ETH_HDR_SIZE - p->ip_data_in_skb); memcpy((p->ptr) + RMNET_ETH_HDR_SIZE + p->ip_data_in_skb, buf, tempval); p->ip_data_in_skb += tempval; sz -= tempval; buf += tempval; if (p->ip_data_in_skb < (p->len - RMNET_ETH_HDR_SIZE)) { continue; } #ifndef LINUX_HOST wake_lock_timeout(&p->rmnet_wake_lock, HZ / 2); #endif p->skb->protocol = eth_type_trans(p->skb, dev); p->stats.rx_packets++; p->stats.rx_bytes += p->skb->len; netif_rx(p->skb); p->len = 0; p->ip_data_in_skb = 0; }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context, *skb2; struct eth_dev *dev = ep->driver_data; int status = req->status; switch (status) { /* normal completion */ case 0: skb_put(skb, req->actual); if (dev->unwrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { status = dev->unwrap(dev->port_usb, skb, &dev->rx_frames); } else { dev_kfree_skb_any(skb); status = -ENOTCONN; } spin_unlock_irqrestore(&dev->lock, flags); } else { skb_queue_tail(&dev->rx_frames, skb); } skb = NULL; skb2 = skb_dequeue(&dev->rx_frames); while (skb2) { if (status < 0 || ETH_HLEN > skb2->len || skb2->len > ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb2->len); dev_kfree_skb_any(skb2); goto next_frame; } #ifdef CONFIG_USB_ETH_PASS_FW ipt_decap_packet(skb2, ipt_cap); #endif skb2->protocol = eth_type_trans(skb2, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb2->len; /* no buffer copies needed, unless hardware can't * use skb buffers. */ status = netif_rx(skb2); next_frame: skb2 = skb_dequeue(&dev->rx_frames); } break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: if (skb) dev_kfree_skb_any(skb); goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } if (skb) dev_kfree_skb_any(skb); if (!netif_running(dev->net)) { clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); }
/* Incoming data */ static void zd1201_usbrx(struct urb *urb) { struct zd1201 *zd = urb->context; int free = 0; unsigned char *data = urb->transfer_buffer; struct sk_buff *skb; unsigned char type; if (!zd) return; switch(urb->status) { case -EILSEQ: case -ENODEV: case -ETIME: case -ENOENT: case -EPIPE: case -EOVERFLOW: case -ESHUTDOWN: dev_warn(&zd->usb->dev, "%s: rx urb failed: %d\n", zd->dev->name, urb->status); free = 1; goto exit; } if (urb->status != 0 || urb->actual_length == 0) goto resubmit; type = data[0]; if (type == ZD1201_PACKET_EVENTSTAT || type == ZD1201_PACKET_RESOURCE) { memcpy(zd->rxdata, data, urb->actual_length); zd->rxlen = urb->actual_length; zd->rxdatas = 1; wake_up(&zd->rxdataq); } /* Info frame */ if (type == ZD1201_PACKET_INQUIRE) { int i = 0; unsigned short infotype, copylen; infotype = le16_to_cpu(*(__le16*)&data[6]); if (infotype == ZD1201_INF_LINKSTATUS) { short linkstatus; linkstatus = le16_to_cpu(*(__le16*)&data[8]); switch(linkstatus) { case 1: netif_carrier_on(zd->dev); break; case 2: netif_carrier_off(zd->dev); break; case 3: netif_carrier_off(zd->dev); break; case 4: netif_carrier_on(zd->dev); break; default: netif_carrier_off(zd->dev); } goto resubmit; } if (infotype == ZD1201_INF_ASSOCSTATUS) { short status = le16_to_cpu(*(__le16*)(data+8)); int event; union iwreq_data wrqu; switch (status) { case ZD1201_ASSOCSTATUS_STAASSOC: case ZD1201_ASSOCSTATUS_REASSOC: event = IWEVREGISTERED; break; case ZD1201_ASSOCSTATUS_DISASSOC: case ZD1201_ASSOCSTATUS_ASSOCFAIL: case ZD1201_ASSOCSTATUS_AUTHFAIL: default: event = IWEVEXPIRED; } memcpy(wrqu.addr.sa_data, data+10, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(zd->dev, event, &wrqu, NULL); goto resubmit; } if (infotype == ZD1201_INF_AUTHREQ) { union iwreq_data wrqu; memcpy(wrqu.addr.sa_data, data+8, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* There isn't a event that trully fits this request. We assume that userspace will be smart enough to see a new station being expired and sends back a authstation ioctl to authorize it. */ wireless_send_event(zd->dev, IWEVEXPIRED, &wrqu, NULL); goto resubmit; } /* Other infotypes are handled outside this handler */ zd->rxlen = 0; while (i < urb->actual_length) { copylen = le16_to_cpu(*(__le16*)&data[i+2]); /* Sanity check, sometimes we get junk */ if (copylen+zd->rxlen > sizeof(zd->rxdata)) break; memcpy(zd->rxdata+zd->rxlen, data+i+4, copylen); zd->rxlen += copylen; i += 64; } if (i >= urb->actual_length) { zd->rxdatas = 1; wake_up(&zd->rxdataq); } goto resubmit; } /* Actual data */ if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) { int datalen = urb->actual_length-1; unsigned short len, fc, seq; len = ntohs(*(__be16 *)&data[datalen-2]); if (len>datalen) len=datalen; fc = le16_to_cpu(*(__le16 *)&data[datalen-16]); seq = le16_to_cpu(*(__le16 *)&data[datalen-24]); if (zd->monitor) { if (datalen < 24) goto resubmit; if (!(skb = dev_alloc_skb(datalen+24))) goto resubmit; skb_put_data(skb, &data[datalen - 16], 2); skb_put_data(skb, &data[datalen - 2], 2); skb_put_data(skb, &data[datalen - 14], 6); skb_put_data(skb, &data[datalen - 22], 6); skb_put_data(skb, &data[datalen - 8], 6); skb_put_data(skb, &data[datalen - 24], 2); skb_put_data(skb, data, len); skb->protocol = eth_type_trans(skb, zd->dev); zd->dev->stats.rx_packets++; zd->dev->stats.rx_bytes += skb->len; netif_rx(skb); goto resubmit; } if ((seq & IEEE80211_SCTL_FRAG) || (fc & IEEE80211_FCTL_MOREFRAGS)) { struct zd1201_frag *frag = NULL; char *ptr; if (datalen<14) goto resubmit; if ((seq & IEEE80211_SCTL_FRAG) == 0) { frag = kmalloc(sizeof(*frag), GFP_ATOMIC); if (!frag) goto resubmit; skb = dev_alloc_skb(IEEE80211_MAX_DATA_LEN +14+2); if (!skb) { kfree(frag); goto resubmit; } frag->skb = skb; frag->seq = seq & IEEE80211_SCTL_SEQ; skb_reserve(skb, 2); skb_put_data(skb, &data[datalen - 14], 12); skb_put_data(skb, &data[6], 2); skb_put_data(skb, data + 8, len); hlist_add_head(&frag->fnode, &zd->fraglist); goto resubmit; } hlist_for_each_entry(frag, &zd->fraglist, fnode) if (frag->seq == (seq&IEEE80211_SCTL_SEQ)) break; if (!frag) goto resubmit; skb = frag->skb; ptr = skb_put(skb, len); if (ptr) memcpy(ptr, data+8, len); if (fc & IEEE80211_FCTL_MOREFRAGS) goto resubmit; hlist_del_init(&frag->fnode); kfree(frag); } else { if (datalen<14)
void wilc_wfi_monitor_rx(u8 *buff, u32 size) { u32 header, pkt_offset; struct sk_buff *skb = NULL; struct wilc_wfi_radiotap_hdr *hdr; struct wilc_wfi_radiotap_cb_hdr *cb_hdr; if (!wilc_wfi_mon) return; if (!netif_running(wilc_wfi_mon)) return; /* Get WILC header */ memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET); le32_to_cpus(&header); /* * The packet offset field contain info about what type of management * the frame we are dealing with and ack status */ pkt_offset = GET_PKT_OFFSET(header); if (pkt_offset & IS_MANAGMEMENT_CALLBACK) { /* hostapd callback mgmt frame */ skb = dev_alloc_skb(size + sizeof(*cb_hdr)); if (!skb) return; skb_put_data(skb, buff, size); cb_hdr = skb_push(skb, sizeof(*cb_hdr)); memset(cb_hdr, 0, sizeof(*cb_hdr)); cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */ cb_hdr->hdr.it_len = cpu_to_le16(sizeof(*cb_hdr)); cb_hdr->hdr.it_present = cpu_to_le32(TX_RADIOTAP_PRESENT); cb_hdr->rate = 5; if (pkt_offset & IS_MGMT_STATUS_SUCCES) { /* success */ cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_RTS; } else { cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_FAIL; } } else { skb = dev_alloc_skb(size + sizeof(*hdr)); if (!skb) return; skb_put_data(skb, buff, size); hdr = skb_push(skb, sizeof(*hdr)); memset(hdr, 0, sizeof(struct wilc_wfi_radiotap_hdr)); hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */ hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); hdr->hdr.it_present = cpu_to_le32 (1 << IEEE80211_RADIOTAP_RATE); hdr->rate = 5; } skb->dev = wilc_wfi_mon; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); }
/* * RX: normal working mode */ static void kni_net_rx_normal(struct kni_dev *kni) { unsigned ret; uint32_t len; unsigned i, num_rx, num_fq; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va[MBUF_BURST_SZ]; void * data_kva; struct sk_buff *skb; struct net_device *dev = kni->net_dev; /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); if (num_fq == 0) { /* No room on the free_q, bail out */ return; } /* Calculate the number of entries to dequeue from rx_q */ num_rx = min(num_fq, (unsigned)MBUF_BURST_SZ); /* Burst dequeue from rx_q */ num_rx = kni_fifo_get(kni->rx_q, (void **)va, num_rx); if (num_rx == 0) return; /* Transfer received packets to netif */ for (i = 0; i < num_rx; i++) { kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; len = kva->data_len; data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; skb = dev_alloc_skb(len + 2); if (!skb) { KNI_ERR("Out of mem, dropping pkts\n"); /* Update statistics */ kni->stats.rx_dropped++; } else { /* Align IP on 16B boundary */ skb_reserve(skb, 2); memcpy(skb_put(skb, len), data_kva, len); skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; /* Call netif interface */ netif_rx(skb); /* Update statistics */ kni->stats.rx_bytes += len; kni->stats.rx_packets++; } } /* Burst enqueue mbufs into free_q */ ret = kni_fifo_put(kni->free_q, (void **)va, num_rx); if (ret != num_rx) /* Failing should not happen */ KNI_ERR("Fail to enqueue entries into free_q\n"); }
/* Packet receive function */ static int sh_eth_rx(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_rxdesc *rxdesc; int entry = mdp->cur_rx % RX_RING_SIZE; int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; struct sk_buff *skb; u16 pkt_len = 0; u32 desc_status, reserve = 0; rxdesc = &mdp->rx_ring[entry]; while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { desc_status = edmac_to_cpu(mdp, rxdesc->status); pkt_len = rxdesc->frame_length; if (--boguscnt < 0) break; if (!(desc_status & RDFEND)) mdp->stats.rx_length_errors++; if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { mdp->stats.rx_errors++; if (desc_status & RD_RFS1) mdp->stats.rx_crc_errors++; if (desc_status & RD_RFS2) mdp->stats.rx_frame_errors++; if (desc_status & RD_RFS3) mdp->stats.rx_length_errors++; if (desc_status & RD_RFS4) mdp->stats.rx_length_errors++; if (desc_status & RD_RFS6) mdp->stats.rx_missed_errors++; if (desc_status & RD_RFS10) mdp->stats.rx_over_errors++; } else { swaps((char *)(rxdesc->addr & ~0x3), pkt_len + 2); skb = mdp->rx_skbuff[entry]; mdp->rx_skbuff[entry] = NULL; skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); netif_rx(skb); ndev->last_rx = jiffies; mdp->stats.rx_packets++; mdp->stats.rx_bytes += pkt_len; } rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); entry = (++mdp->cur_rx) % RX_RING_SIZE; } /* Refill the Rx ring buffers. */ for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { entry = mdp->dirty_rx % RX_RING_SIZE; rxdesc = &mdp->rx_ring[entry]; /* The size of the buffer is 16 byte boundary. */ rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; if (mdp->rx_skbuff[entry] == NULL) { skb = dev_alloc_skb(mdp->rx_buf_sz); mdp->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ skb->dev = ndev; #if defined(CONFIG_CPU_SUBTYPE_SH7763) reserve = SH7763_SKB_ALIGN - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1)); if (reserve) skb_reserve(skb, reserve); #else skb_reserve(skb, RX_OFFSET); #endif skb->ip_summed = CHECKSUM_NONE; rxdesc->addr = (u32)skb->data & ~0x3UL; } if (entry >= RX_RING_SIZE - 1) rxdesc->status |= cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); else rxdesc->status |= cpu_to_edmac(mdp, RD_RACT | RD_RFP); } /* Restart Rx engine if stopped. */ /* If we don't need to check status, don't. -KDU */ if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R)) ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR); return 0; }
/* * Function async_bump (buf, len, stats) * * Got a frame, make a copy of it, and pass it up the stack! We can try * to inline it since it's only called from state_inside_frame */ static inline void async_bump(struct net_device *dev, struct net_device_stats *stats, iobuff_t *rx_buff) { struct sk_buff *newskb; struct sk_buff *dataskb; int docopy; /* Check if we need to copy the data to a new skb or not. * If the driver doesn't use ZeroCopy Rx, we have to do it. * With ZeroCopy Rx, the rx_buff already point to a valid * skb. But, if the frame is small, it is more efficient to * copy it to save memory (copy will be fast anyway - that's * called Rx-copy-break). Jean II */ docopy = ((rx_buff->skb == NULL) || (rx_buff->len < IRDA_RX_COPY_THRESHOLD)); /* Allocate a new skb */ newskb = dev_alloc_skb(docopy ? rx_buff->len + 1 : rx_buff->truesize); if (!newskb) { stats->rx_dropped++; /* We could deliver the current skb if doing ZeroCopy Rx, * but this would stall the Rx path. Better drop the * packet... Jean II */ return; } /* Align IP header to 20 bytes (i.e. increase skb->data) * Note this is only useful with IrLAN, as PPP has a variable * header size (2 or 1 bytes) - Jean II */ skb_reserve(newskb, 1); if(docopy) { /* Copy data without CRC (length already checked) */ skb_copy_to_linear_data(newskb, rx_buff->data, rx_buff->len - 2); /* Deliver this skb */ dataskb = newskb; } else { /* We are using ZeroCopy. Deliver old skb */ dataskb = rx_buff->skb; /* And hook the new skb to the rx_buff */ rx_buff->skb = newskb; rx_buff->head = newskb->data; /* NOT newskb->head */ //printk(KERN_DEBUG "ZeroCopy : len = %d, dataskb = %p, newskb = %p\n", rx_buff->len, dataskb, newskb); } /* Set proper length on skb (without CRC) */ skb_put(dataskb, rx_buff->len - 2); /* Feed it to IrLAP layer */ dataskb->dev = dev; skb_reset_mac_header(dataskb); dataskb->protocol = htons(ETH_P_IRDA); netif_rx(dataskb); stats->rx_packets++; stats->rx_bytes += rx_buff->len; /* Clean up rx_buff (redundant with async_unwrap_bof() ???) */ rx_buff->data = rx_buff->head; rx_buff->len = 0; }
void kni_net_process_rx_packet(struct sk_buff *skb, struct net_device *dev, struct rw_kni_mbuf_metadata *meta_data) { struct kni_dev *kni = netdev_priv(dev); skb->dev = dev; if (kni->no_pci){ skb_reset_mac_header(skb); skb->protocol = htons(RW_KNI_VF_GET_MDATA_ENCAP_TYPE(meta_data)); } else { skb->protocol = eth_type_trans(skb, dev); } skb->ip_summed = CHECKSUM_UNNECESSARY; /*Eth-type trans would have populated the packet-type. Store the old packet-type and populate the new packet-type depending on the mbuf flags*/ rw_fpath_kni_set_skb_packet_type(meta_data, skb); if (RW_KNI_VF_VALID_MDATA_NH_POLICY(meta_data)){ int route_lookup; BUG_ON(RW_KNI_VF_VALID_MDATA_ENCAP_TYPE(meta_data) == 0); switch(skb->protocol){ default: kni->bad_encap++; break; case htons(ETH_P_IP): { uint32_t daddr; kni->v4_policy_fwd++; memcpy(&daddr, RW_KNI_VF_GET_MDATA_NH_POLICY(meta_data), 4); daddr = htonl(daddr); route_lookup = ip_route_input_noref(skb, daddr, daddr, 0, dev); if (route_lookup){ kni->rx_drop_noroute++; }else{ struct neighbour *neigh; struct dst_entry *dst = dst_clone(skb_dst(skb)); struct net_device *neighdev; skb_dst_drop(skb); neighdev = dst->dev; if (likely(neighdev)){ rcu_read_lock_bh(); neigh = __neigh_lookup(&arp_tbl, &daddr, neighdev, 1); if (likely(neigh)){ kni->forced_arp_sent++; __neigh_event_send(neigh, NULL); } rcu_read_unlock_bh(); neigh_release(neigh); } dst_release(dst); } } break; case htons(ETH_P_IPV6): { struct neighbour *neigh = NULL; struct dst_entry *dst = NULL; int i; uint32_t *v6addr; struct flowi6 fl6; struct rt6_info *rt; struct net_device *neighdev; kni->v6_policy_fwd++; v6addr = (uint32_t*)RW_KNI_VF_GET_MDATA_NH_POLICY(meta_data); for (i = 0; i < 4; i++){ fl6.daddr.s6_addr32[i] = htonl(v6addr[i]); } rt = rt6_lookup(dev_net(dev), &fl6.daddr, NULL, 0, 0); if (!rt){ kni->rx_drop_noroute++; }else{ dst = &rt->dst; neighdev = dst->dev; if (likely(neighdev)){ rcu_read_lock_bh(); neigh = __neigh_lookup(ipv6_stub->nd_tbl, &fl6.daddr.s6_addr32[0], neighdev, 1); if (likely(neigh)){ kni->forced_ndisc_sent++; __neigh_event_send(neigh, NULL); } rcu_read_unlock_bh(); neigh_release(neigh); } dst_release(dst); } } break; } } /* Call netif interface */ netif_rx(skb); /* Update statistics */ kni->stats.rx_packets++; }
void announce_802_3_packet( IN VOID *pAdSrc, IN PNDIS_PACKET pPacket, IN UCHAR OpMode) { RTMP_ADAPTER *pAd; PNDIS_PACKET pRxPkt = pPacket; pAd = (RTMP_ADAPTER *)pAdSrc; ASSERT(pPacket); MEM_DBG_PKT_FREE_INC(pPacket); #ifdef CONFIG_AP_SUPPORT #ifdef APCLI_SUPPORT IF_DEV_CONFIG_OPMODE_ON_AP(pAd) { if (RTMP_MATPktRxNeedConvert(pAd, RtmpOsPktNetDevGet(pRxPkt))) RTMP_MATEngineRxHandle(pAd, pRxPkt, 0); } #endif /* APCLI_SUPPORT */ #endif /* CONFIG_AP_SUPPORT */ /* Push up the protocol stack */ #ifdef CONFIG_AP_SUPPORT #ifdef PLATFORM_BL2348 { extern int (*pToUpperLayerPktSent)(PNDIS_PACKET *pSkb); RtmpOsPktProtocolAssign(pRxPkt); pToUpperLayerPktSent(pRxPkt); return; } #endif /* PLATFORM_BL2348 */ #endif /* CONFIG_AP_SUPPORT */ #ifdef IKANOS_VX_1X0 { IKANOS_DataFrameRx(pAd, pRxPkt); return; } #endif /* IKANOS_VX_1X0 */ #ifdef INF_PPA_SUPPORT if (ppa_hook_directpath_send_fn && pAd->PPAEnable==TRUE ) { RtmpOsPktInfPpaSend(pRxPkt); pRxPkt=NULL; return; } #endif /* INF_PPA_SUPPORT */ { #ifdef CONFIG_RT2880_BRIDGING_ONLY PACKET_CB_ASSIGN(pRxPkt, 22) = 0xa8; #endif #if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE) if(ra_classifier_hook_rx!= NULL) { unsigned int flags; RTMP_IRQ_LOCK(&pAd->page_lock, flags); ra_classifier_hook_rx(pRxPkt, classifier_cur_cycle); RTMP_IRQ_UNLOCK(&pAd->page_lock, flags); } #endif /* CONFIG_RA_CLASSIFIER */ #if !defined(CONFIG_RA_NAT_NONE) #if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE) { struct sk_buff *pRxPktb = RTPKT_TO_OSPKT(pRxPkt); FOE_MAGIC_TAG(pRxPktb) = FOE_MAGIC_WLAN; } #endif #ifdef RA_NAT_SUPPORT #if !defined(CONFIG_RA_NAT_NONE) /* bruce+ * ra_sw_nat_hook_rx return 1 --> continue * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx */ if (ra_sw_nat_hook_rx!= NULL) { unsigned int flags; RtmpOsPktProtocolAssign(pRxPkt); RTMP_IRQ_LOCK(&pAd->page_lock, flags); if(ra_sw_nat_hook_rx(pRxPkt)) { netif_rx(pRxPkt); } RTMP_IRQ_UNLOCK(&pAd->page_lock, flags); return; } #endif /* !CONFIG_RA_NAT_NONE */ #endif /* RA_NAT_SUPPORT */ #else { #if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE) FOE_AI(((struct sk_buff *)pRxPkt)) = UN_HIT; #endif /* CONFIG_RA_HW_NAT */ } #endif /* CONFIG_RA_NAT_NONE */ } #ifdef CONFIG_AP_SUPPORT #ifdef BG_FT_SUPPORT if (BG_FTPH_PacketFromApHandle(pRxPkt) == 0) return; #endif /* BG_FT_SUPPORT */ #endif /* CONFIG_AP_SUPPORT */ RtmpOsPktProtocolAssign(pRxPkt); RtmpOsPktRcvHandle(pRxPkt); }
static int vnet_start_xmit(struct sk_buff *skb, struct net_device *net) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)) struct pdp_info *dev = (struct pdp_info *)net->ml_priv; #else struct pdp_info *dev = (struct pdp_info *)net->priv; #endif #ifdef USE_LOOPBACK_PING int ret; struct sk_buff *skb2; struct icmphdr *icmph; struct iphdr *iph; #endif #ifdef USE_LOOPBACK_PING dev->vn_dev.stats.tx_bytes += skb->len; dev->vn_dev.stats.tx_packets++; skb2 = alloc_skb(skb->len, GFP_ATOMIC); if (skb2 == NULL) { DPRINTK(1, "alloc_skb() failed\n"); dev_kfree_skb_any(skb); return -ENOMEM; } memcpy(skb2->data, skb->data, skb->len); skb_put(skb2, skb->len); dev_kfree_skb_any(skb); icmph = (struct icmphdr *)(skb2->data + sizeof(struct iphdr)); iph = (struct iphdr *)skb2->data; icmph->type = __constant_htons(ICMP_ECHOREPLY); ret = iph->daddr; iph->daddr = iph->saddr; iph->saddr = ret; iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); skb2->dev = net; skb2->protocol = __constant_htons(ETH_P_IP); netif_rx(skb2); dev->vn_dev.stats.rx_packets++; dev->vn_dev.stats.rx_bytes += skb->len; #else if (vnet_start_xmit_flag != 0) { return NETDEV_TX_BUSY; } vnet_start_xmit_flag = 1; workqueue_data = (unsigned long)skb; PREPARE_WORK(&dev->vn_dev.xmit_task,vnet_defer_xmit); schedule_work(&dev->vn_dev.xmit_task); netif_stop_queue(net); #endif return 0; }
static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &dev->netdev->stats; skb = alloc_can_err_skb(dev->netdev, &cf); if (skb == NULL) return; if (msg->type == CPC_MSG_TYPE_CAN_STATE) { u8 state = msg->msg.can_state; if (state & SJA1000_SR_BS) { dev->can.state = CAN_STATE_BUS_OFF; cf->can_id |= CAN_ERR_BUSOFF; can_bus_off(dev->netdev); } else if (state & SJA1000_SR_ES) { dev->can.state = CAN_STATE_ERROR_WARNING; dev->can.can_stats.error_warning++; } else { dev->can.state = CAN_STATE_ERROR_ACTIVE; dev->can.can_stats.error_passive++; } } else if (msg->type == CPC_MSG_TYPE_CAN_FRAME_ERROR) { u8 ecc = msg->msg.error.cc.regs.sja1000.ecc; u8 txerr = msg->msg.error.cc.regs.sja1000.txerr; u8 rxerr = msg->msg.error.cc.regs.sja1000.rxerr; /* bus error interrupt */ dev->can.can_stats.bus_error++; stats->rx_errors++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (ecc & SJA1000_ECC_MASK) { case SJA1000_ECC_BIT: cf->data[2] |= CAN_ERR_PROT_BIT; break; case SJA1000_ECC_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; break; case SJA1000_ECC_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = ecc & SJA1000_ECC_SEG; break; } /* Error occurred during transmission? */ if ((ecc & SJA1000_ECC_DIR) == 0) cf->data[2] |= CAN_ERR_PROT_TX; if (dev->can.state == CAN_STATE_ERROR_WARNING || dev->can.state == CAN_STATE_ERROR_PASSIVE) { cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } } else if (msg->type == CPC_MSG_TYPE_OVERRUN) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; } netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; }
void nas_COMMON_receive(uint16_t dlen, void *pdcp_sdu, int inst, struct classifier_entity *rclass, nasRadioBearerId_t rb_id) { //--------------------------------------------------------------------------- struct sk_buff *skb; struct ipversion *ipv; struct nas_priv *gpriv=netdev_priv(nasdev[inst]); uint32_t odaddr,osaddr; //int i; unsigned char protocol; unsigned char /**addr,*/ *daddr,*saddr,*ifaddr /*,sn*/; //struct udphdr *uh; //struct tcphdr *th; uint16_t *cksum,check; struct iphdr *network_header; #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: begin RB %d Inst %d Length %d bytes\n",rb_id,inst,dlen); #endif skb = dev_alloc_skb( dlen + 2 ); if(!skb) { printk("NAS_COMMON_RECEIVE: low on memory\n"); ++gpriv->stats.rx_dropped; return; } skb_reserve(skb,2); memcpy(skb_put(skb, dlen), pdcp_sdu,dlen); skb->dev = nasdev[inst]; skb_reset_mac_header(skb); //printk("[NAC_COMMIN_RECEIVE]: Packet Type %d (%d,%d)",skb->pkt_type,PACKET_HOST,PACKET_BROADCAST); skb->pkt_type = PACKET_HOST; if (rclass->version != NAS_MPLS_VERSION_CODE) { // This is an IP packet skb->ip_summed = CHECKSUM_NONE; ipv = (struct ipversion *)skb->data; switch (ipv->version) { case 6: #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: receive IPv6 message\n"); #endif skb_reset_network_header(skb); skb->protocol = htons(ETH_P_IPV6); // printk("Writing packet with protocol %x\n",ntohs(skb->protocol)); break; case 4: #ifdef NAS_ADDRESS_FIX // Make the third byte of both the source and destination equal to the fourth of the destination daddr = (unsigned char *)&((struct iphdr *)skb->data)->daddr; odaddr = ((struct iphdr *)skb->data)->daddr; // sn = addr[3]; saddr = (unsigned char *)&((struct iphdr *)skb->data)->saddr; osaddr = ((struct iphdr *)skb->data)->saddr; if (daddr[0] == saddr[0]) {// same network daddr[2] = daddr[3]; // set third byte of destination to that of local machine so that local IP stack accepts the packet saddr[2] = daddr[3]; // set third byte of source to that of local machine so that local IP stack accepts the packet } else { // get the 3rd byte from device address in net_device structure ifaddr = (unsigned char *)(&(((struct in_device *)((nasdev[inst])->ip_ptr))->ifa_list->ifa_local)); if (saddr[0] == ifaddr[0]) { // source is in same network as local machine daddr[0] += saddr[3]; // fix address of remote destination to undo change at source saddr[2] = ifaddr[2]; // set third byte to that of local machine so that local IP stack accepts the packet } else { // source is remote machine from outside network saddr[0] -= daddr[3]; // fix address of remote source to be understood by destination daddr[2] = daddr[3]; // fix 3rd byte of local address to be understood by IP stack of // destination } } #endif //NAS_ADDRESS_FIX #ifdef NAS_DEBUG_RECEIVE // printk("NAS_TOOL_RECEIVE: receive IPv4 message\n"); addr = (unsigned char *)&((struct iphdr *)skb->data)->saddr; if (addr) { // addr[2]^=0x01; printk("[NAS][COMMON][RECEIVE] Source %d.%d.%d.%d\n",addr[0],addr[1],addr[2],addr[3]); } addr = (unsigned char *)&((struct iphdr *)skb->data)->daddr; if (addr) { // addr[2]^=0x01; printk("[NAS][COMMON][RECEIVE] Dest %d.%d.%d.%d\n",addr[0],addr[1],addr[2],addr[3]); } printk("[NAS][COMMON][RECEIVE] protocol %d\n",((struct iphdr *)skb->data)->protocol); #endif skb_reset_network_header(skb); network_header = (struct iphdr *)skb_network_header(skb); protocol = network_header->protocol; #ifdef NAS_DEBUG_RECEIVE switch (protocol) { case IPPROTO_IP: printk("[NAS][COMMON][RECEIVE] Received Raw IPv4 packet\n"); break; case IPPROTO_IPV6: printk("[NAS][COMMON][RECEIVE] Received Raw IPv6 packet\n"); break; case IPPROTO_ICMP: printk("[NAS][COMMON][RECEIVE] Received Raw ICMP packet\n"); break; case IPPROTO_TCP: printk("[NAS][COMMON][RECEIVE] Received TCP packet\n"); break; case IPPROTO_UDP: printk("[NAS][COMMON][RECEIVE] Received UDP packet\n"); break; default: break; } #endif #ifdef NAS_ADDRESS_FIX #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: dumping the packet before the csum recalculation (len %d)\n",skb->len); for (i=0; i<skb->len; i++) printk("%2x ",((unsigned char *)(skb->data))[i]); printk("\n"); #endif //NAS_DEBUG_RECEIVE network_header->check = 0; network_header->check = ip_fast_csum((unsigned char *) network_header, network_header->ihl); #ifdef NAS_DEBUG_RECEIVE printk("[NAS][COMMON][RECEIVE] IP Fast Checksum %x \n", network_header->check); #endif // if (!(skb->nh.iph->frag_off & htons(IP_OFFSET))) { switch(protocol) { case IPPROTO_TCP: cksum = (uint16_t*)&(((struct tcphdr*)(((char *)network_header + (network_header->ihl<<2))))->check); //check = csum_tcpudp_magic(((struct iphdr *)network_header)->saddr, ((struct iphdr *)network_header)->daddr, tcp_hdrlen(skb), IPPROTO_TCP, ~(*cksum)); #ifdef NAS_DEBUG_RECEIVE printk("[NAS][COMMON] Inst %d TCP packet calculated CS %x, CS = %x (before), SA (%x)%x, DA (%x)%x\n", inst, network_header->check, *cksum, osaddr, ((struct iphdr *)skb->data)->saddr, odaddr, ((struct iphdr *)skb->data)->daddr); #endif check = csum_tcpudp_magic(((struct iphdr *)skb->data)->saddr, ((struct iphdr *)skb->data)->daddr,0,0, ~(*cksum)); *cksum = csum_tcpudp_magic(~osaddr, ~odaddr, 0, 0, ~check); #ifdef NAS_DEBUG_RECEIVE printk("[NAS][COMMON] Inst %d TCP packet NEW CS %x\n", inst, *cksum); #endif break; case IPPROTO_UDP: cksum = (uint16_t*)&(((struct udphdr*)(((char *)network_header + (network_header->ihl<<2))))->check); // check = csum_tcpudp_magic(((struct iphdr *)network_header)->saddr, ((struct iphdr *)network_header)->daddr, udp_hdr(skb)->len, IPPROTO_UDP, ~(*cksum)); #ifdef NAS_DEBUG_RECEIVE printk("[NAS][COMMON] Inst %d UDP packet CS = %x (before), SA (%x)%x, DA (%x)%x\n", inst, *cksum, osaddr, ((struct iphdr *)skb->data)->saddr, odaddr, ((struct iphdr *)skb->data)->daddr); #endif check = csum_tcpudp_magic(((struct iphdr *)skb->data)->saddr, ((struct iphdr *)skb->data)->daddr, 0,0, ~(*cksum)); *cksum= csum_tcpudp_magic(~osaddr, ~odaddr,0,0, ~check); //*cksum= csum_tcpudp_magic(~osaddr, ~odaddr,udp_hdr(skb)->len, IPPROTO_UDP, ~check); #ifdef NAS_DEBUG_RECEIVE printk("[NAS][COMMON] Inst %d UDP packet NEW CS %x\n", inst, *cksum); #endif // if ((check = *cksum) != 0) { // src, dst, len, proto, sum // } break; default: break; } // } #endif //NAS_ADDRESS_FIX skb->protocol = htons(ETH_P_IP); // printk("[NAS][COMMON] Writing packet with protocol %x\n",ntohs(skb->protocol)); break; default: printk("NAS_COMMON_RECEIVE: begin RB %d Inst %d Length %d bytes\n",rb_id,inst,dlen); printk("[NAS][COMMON] Inst %d: receive unknown message (version=%d)\n",inst,ipv->version); } } else { // This is an MPLS packet #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: Received an MPLS packet on RB %d\n",rb_id); #endif skb->protocol = htons(ETH_P_MPLS_UC); } ++gpriv->stats.rx_packets; gpriv->stats.rx_bytes += dlen; #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: sending packet of size %d to kernel\n",skb->len); for (i=0; i<skb->len; i++) printk("%2x ",((unsigned char *)(skb->data))[i]); printk("\n"); #endif //NAS_DEBUG_RECEIVE netif_rx(skb); #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: end\n"); #endif }
static inline int i596_rx(struct device *dev) { struct i596_private *lp = (struct i596_private *)dev->priv; int frames = 0; if (i596_debug > 3) printk ("i596_rx()\n"); while ((lp->scb.rfd->stat) & STAT_C) { if (i596_debug >2) print_eth(lp->scb.rfd->data); if ((lp->scb.rfd->stat) & STAT_OK) { /* a good frame */ int pkt_len = lp->scb.rfd->count & 0x3fff; struct sk_buff *skb = dev_alloc_skb(pkt_len); frames++; if (skb == NULL) { printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); lp->stats.rx_dropped++; break; } skb->dev = dev; memcpy(skb_put(skb,pkt_len), lp->scb.rfd->data, pkt_len); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); lp->stats.rx_packets++; if (i596_debug > 4) print_eth(skb->data); } else { lp->stats.rx_errors++; if ((lp->scb.rfd->stat) & 0x0001) lp->stats.collisions++; if ((lp->scb.rfd->stat) & 0x0080) lp->stats.rx_length_errors++; if ((lp->scb.rfd->stat) & 0x0100) lp->stats.rx_over_errors++; if ((lp->scb.rfd->stat) & 0x0200) lp->stats.rx_fifo_errors++; if ((lp->scb.rfd->stat) & 0x0400) lp->stats.rx_frame_errors++; if ((lp->scb.rfd->stat) & 0x0800) lp->stats.rx_crc_errors++; if ((lp->scb.rfd->stat) & 0x1000) lp->stats.rx_length_errors++; } lp->scb.rfd->stat = 0; lp->rx_tail->cmd = 0; lp->rx_tail = lp->scb.rfd; lp->scb.rfd = lp->scb.rfd->next; lp->rx_tail->count = 0; lp->rx_tail->cmd = CMD_EOL; } if (i596_debug > 3) printk ("frames %d\n", frames); return 0; }
static void rx_complete(struct urb *req) { struct net_device *dev = req->context; struct usbpn_dev *pnd = netdev_priv(dev); struct page *page = virt_to_page(req->transfer_buffer); struct sk_buff *skb; unsigned long flags; int status = req->status; switch (status) { case 0: spin_lock_irqsave(&pnd->rx_lock, flags); skb = pnd->rx_skb; if (!skb) { skb = pnd->rx_skb = netdev_alloc_skb(dev, 12); if (likely(skb)) { /* Can't use pskb_pull() on page in IRQ */ memcpy(skb_put(skb, 1), page_address(page), 1); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 1, req->actual_length, PAGE_SIZE); page = NULL; } } else { skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, req->actual_length, PAGE_SIZE); page = NULL; } if (req->actual_length < PAGE_SIZE) pnd->rx_skb = NULL; /* Last fragment */ else skb = NULL; spin_unlock_irqrestore(&pnd->rx_lock, flags); if (skb) { skb->protocol = htons(ETH_P_PHONET); skb_reset_mac_header(skb); __skb_pull(skb, 1); skb->dev = dev; dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_rx(skb); } goto resubmit; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: req = NULL; break; case -EOVERFLOW: dev->stats.rx_over_errors++; dev_dbg(&dev->dev, "RX overflow\n"); break; case -EILSEQ: dev->stats.rx_crc_errors++; break; } dev->stats.rx_errors++; resubmit: if (page) put_page(page); if (req) rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD); }
int ethernet_demuxer (mach_msg_header_t *inp, mach_msg_header_t *outp) { struct net_rcv_msg *msg = (struct net_rcv_msg *) inp; struct sk_buff *skb; int datalen; struct ether_device *edev; struct device *dev = 0; mach_port_t local_port; if (inp->msgh_id != NET_RCV_MSG_ID) return 0; if (MACH_MSGH_BITS_LOCAL (inp->msgh_bits) == MACH_MSG_TYPE_PROTECTED_PAYLOAD) { struct port_info *pi = ports_lookup_payload (NULL, inp->msgh_protected_payload, NULL); if (pi) { local_port = pi->port_right; ports_port_deref (pi); } else local_port = MACH_PORT_NULL; } else local_port = inp->msgh_local_port; for (edev = ether_dev; edev; edev = edev->next) if (local_port == edev->readptname) dev = &edev->dev; if (! dev) { if (inp->msgh_remote_port != MACH_PORT_NULL) mach_port_deallocate (mach_task_self (), inp->msgh_remote_port); return 1; } datalen = ETH_HLEN + msg->packet_type.msgt_number - sizeof (struct packet_header); pthread_mutex_lock (&net_bh_lock); skb = alloc_skb (datalen, GFP_ATOMIC); skb_put (skb, datalen); skb->dev = dev; /* Copy the two parts of the frame into the buffer. */ memcpy (skb->data, msg->header, ETH_HLEN); memcpy (skb->data + ETH_HLEN, msg->packet + sizeof (struct packet_header), datalen - ETH_HLEN); /* Drop it on the queue. */ skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); pthread_mutex_unlock (&net_bh_lock); return 1; }
static netdev_tx_t wilc_wfi_mon_xmit(struct sk_buff *skb, struct net_device *dev) { u32 rtap_len, ret = 0; struct wilc_wfi_mon_priv *mon_priv; struct sk_buff *skb2; struct wilc_wfi_radiotap_cb_hdr *cb_hdr; if (!wilc_wfi_mon) return -EFAULT; mon_priv = netdev_priv(wilc_wfi_mon); if (!mon_priv) return -EFAULT; rtap_len = ieee80211_get_radiotap_len(skb->data); if (skb->len < rtap_len) return -1; skb_pull(skb, rtap_len); if (skb->data[0] == 0xc0 && is_broadcast_ether_addr(&skb->data[4])) { skb2 = dev_alloc_skb(skb->len + sizeof(*cb_hdr)); if (!skb2) return -ENOMEM; skb_put_data(skb2, skb->data, skb->len); cb_hdr = skb_push(skb2, sizeof(*cb_hdr)); memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr)); cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */ cb_hdr->hdr.it_len = cpu_to_le16(sizeof(*cb_hdr)); cb_hdr->hdr.it_present = cpu_to_le32(TX_RADIOTAP_PRESENT); cb_hdr->rate = 5; cb_hdr->tx_flags = 0x0004; skb2->dev = wilc_wfi_mon; skb_reset_mac_header(skb2); skb2->ip_summed = CHECKSUM_UNNECESSARY; skb2->pkt_type = PACKET_OTHERHOST; skb2->protocol = htons(ETH_P_802_2); memset(skb2->cb, 0, sizeof(skb2->cb)); netif_rx(skb2); return 0; } skb->dev = mon_priv->real_ndev; memcpy(srcadd, &skb->data[10], 6); memcpy(bssid, &skb->data[16], 6); /* * Identify if data or mgmt packet, if source address and bssid * fields are equal send it to mgmt frames handler */ if (!(memcmp(srcadd, bssid, 6))) { ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len); if (ret) netdev_err(dev, "fail to mgmt tx\n"); dev_kfree_skb(skb); } else { ret = wilc_mac_xmit(skb, mon_priv->real_ndev); } return ret; }
/* * netvsc_recv_callback - Callback when we receive a packet from the * "wire" on the specified device. */ int netvsc_recv_callback(struct hv_device *device_obj, struct hv_netvsc_packet *packet, void **data, struct ndis_tcp_ip_checksum_info *csum_info, struct vmbus_channel *channel, u16 vlan_tci) { struct net_device *net = hv_get_drvdata(device_obj); struct net_device_context *net_device_ctx = netdev_priv(net); struct sk_buff *skb; struct sk_buff *vf_skb; struct netvsc_stats *rx_stats; struct netvsc_device *netvsc_dev = net_device_ctx->nvdev; u32 bytes_recvd = packet->total_data_buflen; int ret = 0; if (!net || net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; if (READ_ONCE(netvsc_dev->vf_inject)) { atomic_inc(&netvsc_dev->vf_use_cnt); if (!READ_ONCE(netvsc_dev->vf_inject)) { /* * We raced; just move on. */ atomic_dec(&netvsc_dev->vf_use_cnt); goto vf_injection_done; } /* * Inject this packet into the VF inerface. * On Hyper-V, multicast and brodcast packets * are only delivered on the synthetic interface * (after subjecting these to policy filters on * the host). Deliver these via the VF interface * in the guest. */ vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet, csum_info, *data, vlan_tci); if (vf_skb != NULL) { ++netvsc_dev->vf_netdev->stats.rx_packets; netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd; netif_receive_skb(vf_skb); } else { ++net->stats.rx_dropped; ret = NVSP_STAT_FAIL; } atomic_dec(&netvsc_dev->vf_use_cnt); return ret; } vf_injection_done: rx_stats = this_cpu_ptr(net_device_ctx->rx_stats); /* Allocate a skb - TODO direct I/O to pages? */ skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci); if (unlikely(!skb)) { ++net->stats.rx_dropped; return NVSP_STAT_FAIL; } skb_record_rx_queue(skb, channel-> offermsg.offer.sub_channel_index); u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; rx_stats->bytes += packet->total_data_buflen; u64_stats_update_end(&rx_stats->syncp); /* * Pass the skb back up. Network stack will deallocate the skb when it * is done. * TODO - use NAPI? */ netif_rx(skb); return 0; }
static void ether00_int( int irq_num, void* dev_id, struct pt_regs* regs) { struct net_device* dev=dev_id; struct net_priv* priv=dev->priv; unsigned int interruptValue; int enable_tx = 0; struct tx_fda_ent *fda_ptr; struct sk_buff* skb; interruptValue=readl(ETHER_INT_SRC(dev->base_addr)); if(!(readl(ETHER_INT_SRC(dev->base_addr)) & ETHER_INT_SRC_IRQ_MSK)) { return; /* Interrupt wasn't caused by us!! */ } if(readl(ETHER_INT_SRC(dev->base_addr))& (ETHER_INT_SRC_INTMACRX_MSK | ETHER_INT_SRC_FDAEX_MSK | ETHER_INT_SRC_BLEX_MSK)) { struct rx_blist_ent* blist_ent_ptr; struct rx_fda_ent* fda_ent_ptr; struct sk_buff* skb; fda_ent_ptr=priv->rx_fda_ptr; spin_lock(&priv->dma_lock); while(fda_ent_ptr<(priv->rx_fda_ptr+RX_NUM_FDESC)){ int result; if(!(fda_ent_ptr->fd.FDCtl&FDCTL_COWNSFD_MSK)) { /* This frame is ready for processing */ /*find the corresponding buffer in the bufferlist */ blist_ent_ptr=priv->rx_blist_vp+fda_ent_ptr->bd.BDStat; skb=(struct sk_buff*)blist_ent_ptr->fd.FDSystem; /* Pass this skb up the stack */ skb->dev=dev; skb_put(skb,fda_ent_ptr->fd.FDLength); skb->protocol=eth_type_trans(skb,dev); skb->ip_summed=CHECKSUM_UNNECESSARY; result=netif_rx(skb); /* Update statistics */ priv->stats.rx_packets++; priv->stats.rx_bytes+=fda_ent_ptr->fd.FDLength; /* Free the FDA entry */ fda_ent_ptr->bd.BDStat=0xff; fda_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK; /* Allocate a new skb and point the bd entry to it */ blist_ent_ptr->fd.FDSystem=0; skb=dev_alloc_skb(PKT_BUF_SZ); if(skb){ setup_blist_entry(skb,blist_ent_ptr); } else if(!priv->memupdate_scheduled){ int tmp; /* There are no buffers at the moment, so schedule */ /* the background task to sort this out */ schedule_task(&priv->tq_memupdate); priv->memupdate_scheduled=1; printk(KERN_DEBUG "%s:No buffers",dev->name); /* If this interrupt was due to a lack of buffers then * we'd better stop the receiver too */ if(interruptValueÐER_INT_SRC_BLEX_MSK){ priv->rx_disabled=1; tmp=readl(ETHER_INT_SRC(dev->base_addr)); writel(tmp&~ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr)); printk(KERN_DEBUG "%s:Halting rx",dev->name); } } } fda_ent_ptr++; } spin_unlock(&priv->dma_lock); /* Clear the interrupts */ writel(ETHER_INT_SRC_INTMACRX_MSK | ETHER_INT_SRC_FDAEX_MSK | ETHER_INT_SRC_BLEX_MSK,ETHER_INT_SRC(dev->base_addr)); } if(readl(ETHER_INT_SRC(dev->base_addr))ÐER_INT_SRC_INTMACTX_MSK){ /* Transmit interrupt */ fda_ptr=(struct tx_fda_ent*) priv->tx_tail; /* free up all completed frames */ while(!(FDCTL_COWNSFD_MSK&fda_ptr->fd.FDCtl) && fda_ptr->fd.FDSystem){ priv->stats.tx_packets++; priv->stats.tx_bytes+=fda_ptr->bd.BuffLength; skb=(struct sk_buff*)fda_ptr->fd.FDSystem; dev_kfree_skb_irq(skb); fda_ptr->fd.FDSystem=0; fda_ptr->fd.FDStat=0; fda_ptr->fd.FDCtl=0; fda_ptr = (struct tx_fda_ent *)__dma_va(fda_ptr->fd.FDNext); enable_tx = 1; } priv->tx_tail = (unsigned int) fda_ptr; if(priv->queue_stopped && enable_tx){ priv->queue_stopped=0; netif_wake_queue(dev); } /* Clear the interrupt */ writel(ETHER_INT_SRC_INTMACTX_MSK,ETHER_INT_SRC(dev->base_addr)); } if (readl(ETHER_INT_SRC(dev->base_addr)) & (ETHER_INT_SRC_SWINT_MSK| ETHER_INT_SRC_INTEARNOT_MSK| ETHER_INT_SRC_INTLINK_MSK| ETHER_INT_SRC_INTEXBD_MSK| ETHER_INT_SRC_INTTXCTLCMP_MSK)) { /* * Not using any of these so they shouldn't happen * * In the cased of INTEXBD - if you allocate more * than 28 decsriptors you may need to think about this */ printk("Not using this interrupt\n"); } if (readl(ETHER_INT_SRC(dev->base_addr)) & (ETHER_INT_SRC_INTSBUS_MSK | ETHER_INT_SRC_INTNRABT_MSK |ETHER_INT_SRC_DMPARERR_MSK)) { /* * Hardware errors, we can either ignore them and hope they go away *or reset the device, I'll try the first for now to see if they happen */ printk("Hardware error\n"); } }
static int ipgre_rcv(struct sk_buff *skb) { struct iphdr *iph; u8 *h; __be16 flags; __sum16 csum = 0; __be32 key = 0; u32 seqno = 0; struct ip_tunnel *tunnel; int offset = 4; __be16 gre_proto; unsigned int len; if (!pskb_may_pull(skb, 16)) goto drop_nolock; iph = ip_hdr(skb); h = skb->data; flags = *(__be16*)h; if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { /* - Version must be 0. - We do not support routing headers. */ if (flags&(GRE_VERSION|GRE_ROUTING)) goto drop_nolock; if (flags&GRE_CSUM) { switch (skb->ip_summed) { case CHECKSUM_COMPLETE: csum = csum_fold(skb->csum); if (!csum) break; /* fall through */ case CHECKSUM_NONE: skb->csum = 0; csum = __skb_checksum_complete(skb); skb->ip_summed = CHECKSUM_COMPLETE; } offset += 4; } if (flags&GRE_KEY) { key = *(__be32*)(h + offset); offset += 4; } if (flags&GRE_SEQ) { seqno = ntohl(*(__be32*)(h + offset)); offset += 4; } } gre_proto = *(__be16 *)(h + 2); read_lock(&ipgre_lock); if ((tunnel = ipgre_tunnel_lookup(skb->dev, iph->saddr, iph->daddr, key, gre_proto))) { struct net_device_stats *stats = &tunnel->dev->stats; secpath_reset(skb); skb->protocol = gre_proto; /* WCCP version 1 and 2 protocol decoding. * - Change protocol to IP * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header */ if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { skb->protocol = htons(ETH_P_IP); if ((*(h + offset) & 0xF0) != 0x40) offset += 4; } skb->mac_header = skb->network_header; __pskb_pull(skb, offset); skb_postpull_rcsum(skb, skb_transport_header(skb), offset); skb->pkt_type = PACKET_HOST; #ifdef CONFIG_NET_IPGRE_BROADCAST if (ipv4_is_multicast(iph->daddr)) { /* Looped back packet, drop it! */ if (skb_rtable(skb)->fl.iif == 0) goto drop; stats->multicast++; skb->pkt_type = PACKET_BROADCAST; } #endif if (((flags&GRE_CSUM) && csum) || (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { stats->rx_crc_errors++; stats->rx_errors++; goto drop; } if (tunnel->parms.i_flags&GRE_SEQ) { if (!(flags&GRE_SEQ) || (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) { stats->rx_fifo_errors++; stats->rx_errors++; goto drop; } tunnel->i_seqno = seqno + 1; } len = skb->len; /* Warning: All skb pointers will be invalidated! */ if (tunnel->dev->type == ARPHRD_ETHER) { if (!pskb_may_pull(skb, ETH_HLEN)) { stats->rx_length_errors++; stats->rx_errors++; goto drop; } iph = ip_hdr(skb); skb->protocol = eth_type_trans(skb, tunnel->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); } stats->rx_packets++; stats->rx_bytes += len; skb->dev = tunnel->dev; skb_dst_drop(skb); nf_reset(skb); skb_reset_network_header(skb); ipgre_ecn_decapsulate(iph, skb); netif_rx(skb); read_unlock(&ipgre_lock); return(0); } icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: read_unlock(&ipgre_lock); drop_nolock: kfree_skb(skb); return(0); }
static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, ax25_address *dev_addr, struct packet_type *ptype) { ax25_address src, dest, *next_digi = NULL; int type = 0, mine = 0, dama; struct sock *make, *sk; ax25_digi dp, reverse_dp; ax25_cb *ax25; ax25_dev *ax25_dev; /* * Process the AX.25/LAPB frame. */ skb_reset_transport_header(skb); if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) goto free; /* * Parse the address header. */ if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) goto free; /* * Ours perhaps ? */ if (dp.lastrepeat + 1 < dp.ndigi) /* Not yet digipeated completely */ next_digi = &dp.calls[dp.lastrepeat + 1]; /* * Pull of the AX.25 headers leaving the CTRL/PID bytes */ skb_pull(skb, ax25_addr_size(&dp)); /* For our port addresses ? */ if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi) mine = 1; /* Also match on any registered callsign from L3/4 */ if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi) mine = 1; /* UI frame - bypass LAPB processing */ if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) { skb_set_transport_header(skb, 2); /* skip control and pid */ ax25_send_to_raw(&dest, skb, skb->data[1]); if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) goto free; /* Now we are pointing at the pid byte */ switch (skb->data[1]) { case AX25_P_IP: skb_pull(skb,2); /* drop PID/CTRL */ skb_reset_transport_header(skb); skb_reset_network_header(skb); skb->dev = dev; skb->pkt_type = PACKET_HOST; skb->protocol = htons(ETH_P_IP); netif_rx(skb); break; case AX25_P_ARP: skb_pull(skb,2); skb_reset_transport_header(skb); skb_reset_network_header(skb); skb->dev = dev; skb->pkt_type = PACKET_HOST; skb->protocol = htons(ETH_P_ARP); netif_rx(skb); break; case AX25_P_TEXT: /* Now find a suitable dgram socket */ sk = ax25_get_socket(&dest, &src, SOCK_DGRAM); if (sk != NULL) { bh_lock_sock(sk); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { kfree_skb(skb); } else { /* * Remove the control and PID. */ skb_pull(skb, 2); if (sock_queue_rcv_skb(sk, skb) != 0) kfree_skb(skb); } bh_unlock_sock(sk); sock_put(sk); } else { kfree_skb(skb); } break; default: kfree_skb(skb); /* Will scan SOCK_AX25 RAW sockets */ break; } return 0; } /* * Is connected mode supported on this device ? * If not, should we DM the incoming frame (except DMs) or * silently ignore them. For now we stay quiet. */ if (ax25_dev->values[AX25_VALUES_CONMODE] == 0) goto free; /* LAPB */ /* AX.25 state 1-4 */ ax25_digi_invert(&dp, &reverse_dp); if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) { /* * Process the frame. If it is queued up internally it * returns one otherwise we free it immediately. This * routine itself wakes the user context layers so we do * no further work */ if (ax25_process_rx_frame(ax25, skb, type, dama) == 0) kfree_skb(skb); ax25_cb_put(ax25); return 0; } /* AX.25 state 0 (disconnected) */ /* a) received not a SABM(E) */ if ((*skb->data & ~AX25_PF) != AX25_SABM && (*skb->data & ~AX25_PF) != AX25_SABME) { /* * Never reply to a DM. Also ignore any connects for * addresses that are not our interfaces and not a socket. */ if ((*skb->data & ~AX25_PF) != AX25_DM && mine) ax25_return_dm(dev, &src, &dest, &dp); goto free; } /* b) received SABM(E) */ if (dp.lastrepeat + 1 == dp.ndigi) sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET); else sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET); if (sk != NULL) { bh_lock_sock(sk); if (sk_acceptq_is_full(sk) || (make = ax25_make_new(sk, ax25_dev)) == NULL) { if (mine) ax25_return_dm(dev, &src, &dest, &dp); kfree_skb(skb); bh_unlock_sock(sk); sock_put(sk); return 0; } ax25 = ax25_sk(make); skb_set_owner_r(skb, make); skb_queue_head(&sk->sk_receive_queue, skb); make->sk_state = TCP_ESTABLISHED; sk->sk_ack_backlog++; bh_unlock_sock(sk); } else { if (!mine) goto free; if ((ax25 = ax25_create_cb()) == NULL) { ax25_return_dm(dev, &src, &dest, &dp); goto free; } ax25_fillin_cb(ax25, ax25_dev); } ax25->source_addr = dest; ax25->dest_addr = src; /* * Sort out any digipeated paths. */ if (dp.ndigi && !ax25->digipeat && (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { kfree_skb(skb); ax25_destroy_socket(ax25); if (sk) sock_put(sk); return 0; } if (dp.ndigi == 0) { kfree(ax25->digipeat); ax25->digipeat = NULL; } else { /* Reverse the source SABM's path */ memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi)); } if ((*skb->data & ~AX25_PF) == AX25_SABME) { ax25->modulus = AX25_EMODULUS; ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW]; } else { ax25->modulus = AX25_MODULUS; ax25->window = ax25_dev->values[AX25_VALUES_WINDOW]; } ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE); #ifdef CONFIG_AX25_DAMA_SLAVE if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE) ax25_dama_on(ax25); #endif ax25->state = AX25_STATE_3; ax25_cb_add(ax25); ax25_start_heartbeat(ax25); ax25_start_t3timer(ax25); ax25_start_idletimer(ax25); if (sk) { if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); sock_put(sk); } else { free: kfree_skb(skb); } return 0; }
/* receive a single frame and assemble datagram * (this is the heart of the interrupt routine) */ static inline int sb1000_rx(struct net_device *dev) { #define FRAMESIZE 184 unsigned char st[2], buffer[FRAMESIZE], session_id, frame_id; short dlen; int ioaddr, ns; unsigned int skbsize; struct sk_buff *skb; struct sb1000_private *lp = (struct sb1000_private *)dev->priv; struct net_device_stats *stats = &lp->stats; /* SB1000 frame constants */ const int FrameSize = FRAMESIZE; const int NewDatagramHeaderSkip = 8; const int NewDatagramHeaderSize = NewDatagramHeaderSkip + 18; const int NewDatagramDataSize = FrameSize - NewDatagramHeaderSize; const int ContDatagramHeaderSkip = 7; const int ContDatagramHeaderSize = ContDatagramHeaderSkip + 1; const int ContDatagramDataSize = FrameSize - ContDatagramHeaderSize; const int TrailerSize = 4; ioaddr = dev->base_addr; insw(ioaddr, (unsigned short*) st, 1); #ifdef XXXDEBUG printk("cm0: received: %02x %02x\n", st[0], st[1]); #endif /* XXXDEBUG */ lp->rx_frames++; /* decide if it is a good or bad frame */ for (ns = 0; ns < NPIDS; ns++) { session_id = lp->rx_session_id[ns]; frame_id = lp->rx_frame_id[ns]; if (st[0] == session_id) { if (st[1] == frame_id || (!frame_id && (st[1] & 0xf0) == 0x30)) { goto good_frame; } else if ((st[1] & 0xf0) == 0x30 && (st[0] & 0x40)) { goto skipped_frame; } else { goto bad_frame; } } else if (st[0] == (session_id | 0x40)) { if ((st[1] & 0xf0) == 0x30) { goto skipped_frame; } else { goto bad_frame; } } } goto bad_frame; skipped_frame: stats->rx_frame_errors++; skb = lp->rx_skb[ns]; if (sb1000_debug > 1) printk(KERN_WARNING "%s: missing frame(s): got %02x %02x " "expecting %02x %02x\n", dev->name, st[0], st[1], skb ? session_id : session_id | 0x40, frame_id); if (skb) { dev_kfree_skb(skb); skb = 0; } good_frame: lp->rx_frame_id[ns] = 0x30 | ((st[1] + 1) & 0x0f); /* new datagram */ if (st[0] & 0x40) { /* get data length */ insw(ioaddr, buffer, NewDatagramHeaderSize / 2); #ifdef XXXDEBUG printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[30], buffer[31], buffer[32], buffer[33]); #endif /* XXXDEBUG */ if (buffer[0] != NewDatagramHeaderSkip) { if (sb1000_debug > 1) printk(KERN_WARNING "%s: new datagram header skip error: " "got %02x expecting %02x\n", dev->name, buffer[0], NewDatagramHeaderSkip); stats->rx_length_errors++; insw(ioaddr, buffer, NewDatagramDataSize / 2); goto bad_frame_next; } dlen = ((buffer[NewDatagramHeaderSkip + 3] & 0x0f) << 8 | buffer[NewDatagramHeaderSkip + 4]) - 17; if (dlen > SB1000_MRU) { if (sb1000_debug > 1) printk(KERN_WARNING "%s: datagram length (%d) greater " "than MRU (%d)\n", dev->name, dlen, SB1000_MRU); stats->rx_length_errors++; insw(ioaddr, buffer, NewDatagramDataSize / 2); goto bad_frame_next; } lp->rx_dlen[ns] = dlen; /* compute size to allocate for datagram */ skbsize = dlen + FrameSize; if ((skb = alloc_skb(skbsize, GFP_ATOMIC)) == NULL) { if (sb1000_debug > 1) printk(KERN_WARNING "%s: can't allocate %d bytes long " "skbuff\n", dev->name, skbsize); stats->rx_dropped++; insw(ioaddr, buffer, NewDatagramDataSize / 2); goto dropped_frame; } skb->dev = dev; skb->mac.raw = skb->data; skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16]; insw(ioaddr, skb_put(skb, NewDatagramDataSize), NewDatagramDataSize / 2); lp->rx_skb[ns] = skb; } else { /* continuation of previous datagram */ insw(ioaddr, buffer, ContDatagramHeaderSize / 2); if (buffer[0] != ContDatagramHeaderSkip) { if (sb1000_debug > 1) printk(KERN_WARNING "%s: cont datagram header skip error: " "got %02x expecting %02x\n", dev->name, buffer[0], ContDatagramHeaderSkip); stats->rx_length_errors++; insw(ioaddr, buffer, ContDatagramDataSize / 2); goto bad_frame_next; } skb = lp->rx_skb[ns]; insw(ioaddr, skb_put(skb, ContDatagramDataSize), ContDatagramDataSize / 2); dlen = lp->rx_dlen[ns]; } if (skb->len < dlen + TrailerSize) { lp->rx_session_id[ns] &= ~0x40; return 0; } /* datagram completed: send to upper level */ skb_trim(skb, dlen); netif_rx(skb); dev->last_rx = jiffies; stats->rx_bytes+=dlen; stats->rx_packets++; lp->rx_skb[ns] = 0; lp->rx_session_id[ns] |= 0x40; return 0; bad_frame: insw(ioaddr, buffer, FrameSize / 2); if (sb1000_debug > 1) printk(KERN_WARNING "%s: frame error: got %02x %02x\n", dev->name, st[0], st[1]); stats->rx_frame_errors++; bad_frame_next: if (sb1000_debug > 2) sb1000_print_status_buffer(dev->name, st, buffer, FrameSize); dropped_frame: stats->rx_errors++; if (ns < NPIDS) { if ((skb = lp->rx_skb[ns])) { dev_kfree_skb(skb); lp->rx_skb[ns] = 0; } lp->rx_session_id[ns] |= 0x40; } return -1; }
/* Received a packet and pass to upper layer */ static void dmfe_packet_receive(struct net_device *dev) { board_info_t *db = (board_info_t *)dev->priv; struct sk_buff *skb; u8 rxbyte; u16 i, GoodPacket, tmplen = 0, MDRAH, MDRAL; u32 tmpdata; rx_t rx; u16 * ptr = (u16*)℞ u8* rdptr; DMFE_DBUG(0, "dmfe_packet_receive()", 0); db->cont_rx_pkt_cnt=0; do { /*store the value of Memory Data Read address register*/ MDRAH=ior(db, DM9KS_MDRAH); MDRAL=ior(db, DM9KS_MDRAL); ior(db, DM9KS_MRCMDX); /* Dummy read */ rxbyte = inb(db->io_data); /* Got most updated data */ #ifdef CHECKSUM if (rxbyte&0x2) /* check RX byte */ { printk("dm9ks: abnormal!\n"); dmfe_reset(dev); break; }else { if (!(rxbyte&0x1)) break; } #else if (rxbyte==0) break; if (rxbyte>1) { printk("dm9ks: Rxbyte error!\n"); dmfe_reset(dev); break; } #endif /* A packet ready now & Get status/length */ GoodPacket = TRUE; outb(DM9KS_MRCMD, db->io_addr); /* Read packet status & length */ switch (db->io_mode) { case DM9KS_BYTE_MODE: *ptr = inb(db->io_data) + (inb(db->io_data) << 8); *(ptr+1) = inb(db->io_data) + (inb(db->io_data) << 8); break; case DM9KS_WORD_MODE: *ptr = inw(db->io_data); *(ptr+1) = inw(db->io_data); break; case DM9KS_DWORD_MODE: tmpdata = inl(db->io_data); *ptr = tmpdata; *(ptr+1) = tmpdata >> 16; break; default: break; } /* Packet status check */ if (rx.desc.status & 0xbf) { GoodPacket = FALSE; if (rx.desc.status & 0x01) { db->stats.rx_fifo_errors++; printk(KERN_INFO"<RX FIFO error>\n"); } if (rx.desc.status & 0x02) { db->stats.rx_crc_errors++; printk(KERN_INFO"<RX CRC error>\n"); } if (rx.desc.status & 0x80) { db->stats.rx_length_errors++; printk(KERN_INFO"<RX Length error>\n"); } if (rx.desc.status & 0x08) printk(KERN_INFO"<Physical Layer error>\n"); } if (!GoodPacket) { // drop this packet!!! switch (db->io_mode) { case DM9KS_BYTE_MODE: for (i=0; i<rx.desc.length; i++) inb(db->io_data); break; case DM9KS_WORD_MODE: tmplen = (rx.desc.length + 1) / 2; for (i = 0; i < tmplen; i++) inw(db->io_data); break; case DM9KS_DWORD_MODE: tmplen = (rx.desc.length + 3) / 4; for (i = 0; i < tmplen; i++) inl(db->io_data); break; } continue;/*next the packet*/ } skb = dev_alloc_skb(rx.desc.length+4); if (skb == NULL ) { printk(KERN_INFO "%s: Memory squeeze.\n", dev->name); /*re-load the value into Memory data read address register*/ iow(db,DM9KS_MDRAH,MDRAH); iow(db,DM9KS_MDRAL,MDRAL); return; } else { /* Move data from DM9000 */ skb->dev = dev; skb_reserve(skb, 2); rdptr = (u8*)skb_put(skb, rx.desc.length - 4); /* Read received packet from RX SARM */ switch (db->io_mode) { case DM9KS_BYTE_MODE: for (i=0; i<rx.desc.length; i++) rdptr[i]=inb(db->io_data); break; case DM9KS_WORD_MODE: tmplen = (rx.desc.length + 1) / 2; for (i = 0; i < tmplen; i++) ((u16 *)rdptr)[i] = inw(db->io_data); break; case DM9KS_DWORD_MODE: tmplen = (rx.desc.length + 3) / 4; for (i = 0; i < tmplen; i++) ((u32 *)rdptr)[i] = inl(db->io_data); break; } /* Pass to upper layer */ skb->protocol = eth_type_trans(skb,dev); #ifdef CHECKSUM if((rxbyte&0xe0)==0) /* receive packet no checksum fail */ skb->ip_summed = CHECKSUM_UNNECESSARY; #endif netif_rx(skb); dev->last_rx=jiffies; db->stats.rx_packets++; db->stats.rx_bytes += rx.desc.length; db->cont_rx_pkt_cnt++; #ifdef RDBG /* check RX FIFO pointer */ u16 MDRAH1, MDRAL1; u16 tmp_ptr; MDRAH1 = ior(db,DM9KS_MDRAH); MDRAL1 = ior(db,DM9KS_MDRAL); tmp_ptr = (MDRAH<<8)|MDRAL; switch (db->io_mode) { case DM9KS_BYTE_MODE: tmp_ptr += rx.desc.length+4; break; case DM9KS_WORD_MODE: tmp_ptr += ((rx.desc.length+1)/2)*2+4; break; case DM9KS_DWORD_MODE: tmp_ptr += ((rx.desc.length+3)/4)*4+4; break; } if (tmp_ptr >=0x4000) tmp_ptr = (tmp_ptr - 0x4000) + 0xc00; if (tmp_ptr != ((MDRAH1<<8)|MDRAL1)) printk("[dm9ks:RX FIFO ERROR\n"); #endif if (db->cont_rx_pkt_cnt>=CONT_RX_PKT_CNT) { dmfe_tx_done(0); break; } } }while((rxbyte & 0x01) == DM9KS_PKT_RDY); DMFE_DBUG(0, "[END]dmfe_packet_receive()", 0); }
/* packet receiver */ static void rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length) { struct arcnet_local *lp = netdev_priv(dev); struct sk_buff *skb; struct archdr *pkt = pkthdr; struct arc_rfc1201 *soft = &pkthdr->soft.rfc1201; int saddr = pkt->hard.source, ofs; struct Incoming *in = &lp->rfc1201.incoming[saddr]; BUGMSG(D_DURING, "it's an RFC1201 packet (length=%d)\n", length); if (length >= MinTU) ofs = 512 - length; else ofs = 256 - length; if (soft->split_flag == 0xFF) { /* Exception Packet */ if (length >= 4 + RFC1201_HDR_SIZE) BUGMSG(D_DURING, "compensating for exception packet\n"); else { BUGMSG(D_EXTRA, "short RFC1201 exception packet from %02Xh", saddr); return; } /* skip over 4-byte junkola */ length -= 4; ofs += 4; lp->hw.copy_from_card(dev, bufnum, 512 - length, soft, sizeof(pkt->soft)); } if (!soft->split_flag) { /* not split */ BUGMSG(D_RX, "incoming is not split (splitflag=%d)\n", soft->split_flag); if (in->skb) { /* already assembling one! */ BUGMSG(D_EXTRA, "aborting assembly (seq=%d) for unsplit packet (splitflag=%d, seq=%d)\n", in->sequence, soft->split_flag, soft->sequence); lp->rfc1201.aborted_seq = soft->sequence; dev_kfree_skb_irq(in->skb); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; in->skb = NULL; } in->sequence = soft->sequence; skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); if (skb == NULL) { BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); dev->stats.rx_dropped++; return; } skb_put(skb, length + ARC_HDR_SIZE); skb->dev = dev; pkt = (struct archdr *) skb->data; soft = &pkt->soft.rfc1201; /* up to sizeof(pkt->soft) has already been copied from the card */ memcpy(pkt, pkthdr, sizeof(struct archdr)); if (length > sizeof(pkt->soft)) lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft), pkt->soft.raw + sizeof(pkt->soft), length - sizeof(pkt->soft)); /* * ARP packets have problems when sent from some DOS systems: the * source address is always 0! So we take the hardware source addr * (which is impossible to fumble) and insert it ourselves. */ if (soft->proto == ARC_P_ARP) { struct arphdr *arp = (struct arphdr *) soft->payload; /* make sure addresses are the right length */ if (arp->ar_hln == 1 && arp->ar_pln == 4) { uint8_t *cptr = (uint8_t *) arp + sizeof(struct arphdr); if (!*cptr) { /* is saddr = 00? */ BUGMSG(D_EXTRA, "ARP source address was 00h, set to %02Xh.\n", saddr); dev->stats.rx_crc_errors++; *cptr = saddr; } else { BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n", *cptr); } } else { BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n", arp->ar_hln, arp->ar_pln); dev->stats.rx_errors++; dev->stats.rx_crc_errors++; } } BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); skb->protocol = type_trans(skb, dev); netif_rx(skb); } else { /* split packet */ /* * NOTE: MSDOS ARP packet correction should only need to apply to * unsplit packets, since ARP packets are so short. * * My interpretation of the RFC1201 document is that if a packet is * received out of order, the entire assembly process should be * aborted. * * The RFC also mentions "it is possible for successfully received * packets to be retransmitted." As of 0.40 all previously received * packets are allowed, not just the most recent one. * * We allow multiple assembly processes, one for each ARCnet card * possible on the network. Seems rather like a waste of memory, * but there's no other way to be reliable. */ BUGMSG(D_RX, "packet is split (splitflag=%d, seq=%d)\n", soft->split_flag, in->sequence); if (in->skb && in->sequence != soft->sequence) { BUGMSG(D_EXTRA, "wrong seq number (saddr=%d, expected=%d, seq=%d, splitflag=%d)\n", saddr, in->sequence, soft->sequence, soft->split_flag); dev_kfree_skb_irq(in->skb); in->skb = NULL; dev->stats.rx_errors++; dev->stats.rx_missed_errors++; in->lastpacket = in->numpackets = 0; } if (soft->split_flag & 1) { /* first packet in split */ BUGMSG(D_RX, "brand new splitpacket (splitflag=%d)\n", soft->split_flag); if (in->skb) { /* already assembling one! */ BUGMSG(D_EXTRA, "aborting previous (seq=%d) assembly " "(splitflag=%d, seq=%d)\n", in->sequence, soft->split_flag, soft->sequence); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; dev_kfree_skb_irq(in->skb); } in->sequence = soft->sequence; in->numpackets = ((unsigned) soft->split_flag >> 1) + 2; in->lastpacket = 1; if (in->numpackets > 16) { BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n", soft->split_flag); lp->rfc1201.aborted_seq = soft->sequence; dev->stats.rx_errors++; dev->stats.rx_length_errors++; return; } in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE, GFP_ATOMIC); if (skb == NULL) { BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n"); lp->rfc1201.aborted_seq = soft->sequence; dev->stats.rx_dropped++; return; } skb->dev = dev; pkt = (struct archdr *) skb->data; soft = &pkt->soft.rfc1201; memcpy(pkt, pkthdr, ARC_HDR_SIZE + RFC1201_HDR_SIZE); skb_put(skb, ARC_HDR_SIZE + RFC1201_HDR_SIZE); soft->split_flag = 0; /* end result won't be split */ } else { /* not first packet */
bool device_receive_frame( PSDevice pDevice, PSRxDesc pCurrRD ) { PDEVICE_RD_INFO pRDInfo = pCurrRD->pRDInfo; struct net_device_stats *pStats = &pDevice->stats; struct sk_buff *skb; PSMgmtObject pMgmt = pDevice->pMgmt; PSRxMgmtPacket pRxPacket = &(pDevice->pMgmt->sRxPacket); PS802_11Header p802_11Header; unsigned char *pbyRsr; unsigned char *pbyNewRsr; unsigned char *pbyRSSI; PQWORD pqwTSFTime; unsigned short *pwFrameSize; unsigned char *pbyFrame; bool bDeFragRx = false; bool bIsWEP = false; unsigned int cbHeaderOffset; unsigned int FrameSize; unsigned short wEtherType = 0; int iSANodeIndex = -1; int iDANodeIndex = -1; unsigned int ii; unsigned int cbIVOffset; bool bExtIV = false; unsigned char *pbyRxSts; unsigned char *pbyRxRate; unsigned char *pbySQ; unsigned int cbHeaderSize; PSKeyItem pKey = NULL; unsigned short wRxTSC15_0 = 0; unsigned long dwRxTSC47_16 = 0; SKeyItem STempKey; // 802.11h RPI unsigned long dwDuration = 0; long ldBm = 0; long ldBmThreshold = 0; PS802_11Header pMACHeader; bool bRxeapol_key = false; // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---------- device_receive_frame---\n"); skb = pRDInfo->skb; //PLICE_DEBUG-> #if 1 pci_unmap_single(pDevice->pcid, pRDInfo->skb_dma, pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE); #endif //PLICE_DEBUG<- pwFrameSize = (unsigned short *)(skb->data + 2); FrameSize = cpu_to_le16(pCurrRD->m_rd1RD1.wReqCount) - cpu_to_le16(pCurrRD->m_rd0RD0.wResCount); // Max: 2312Payload + 30HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR // Min (ACK): 10HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR if ((FrameSize > 2364) || (FrameSize <= 32)) { // Frame Size error drop this packet. DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---------- WRONG Length 1 \n"); return false; } pbyRxSts = (unsigned char *)(skb->data); pbyRxRate = (unsigned char *)(skb->data + 1); pbyRsr = (unsigned char *)(skb->data + FrameSize - 1); pbyRSSI = (unsigned char *)(skb->data + FrameSize - 2); pbyNewRsr = (unsigned char *)(skb->data + FrameSize - 3); pbySQ = (unsigned char *)(skb->data + FrameSize - 4); pqwTSFTime = (PQWORD)(skb->data + FrameSize - 12); pbyFrame = (unsigned char *)(skb->data + 4); // get packet size FrameSize = cpu_to_le16(*pwFrameSize); if ((FrameSize > 2346)|(FrameSize < 14)) { // Max: 2312Payload + 30HD +4CRC // Min: 14 bytes ACK DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---------- WRONG Length 2 \n"); return false; } //PLICE_DEBUG-> #if 1 // update receive statistic counter STAvUpdateRDStatCounter(&pDevice->scStatistic, *pbyRsr, *pbyNewRsr, *pbyRxRate, pbyFrame, FrameSize); #endif pMACHeader = (PS802_11Header)((unsigned char *)(skb->data) + 8); //PLICE_DEBUG<- if (pDevice->bMeasureInProgress == true) { if ((*pbyRsr & RSR_CRCOK) != 0) { pDevice->byBasicMap |= 0x01; } dwDuration = (FrameSize << 4); dwDuration /= acbyRxRate[*pbyRxRate%MAX_RATE]; if (*pbyRxRate <= RATE_11M) { if (*pbyRxSts & 0x01) { // long preamble dwDuration += 192; } else { // short preamble dwDuration += 96; } } else { dwDuration += 16; } RFvRSSITodBm(pDevice, *pbyRSSI, &ldBm); ldBmThreshold = -57; for (ii = 7; ii > 0;) { if (ldBm > ldBmThreshold) { break; } ldBmThreshold -= 5; ii--; } pDevice->dwRPIs[ii] += dwDuration; return false; } if (!is_multicast_ether_addr(pbyFrame)) { if (WCTLbIsDuplicate(&(pDevice->sDupRxCache), (PS802_11Header)(skb->data + 4))) { pDevice->s802_11Counter.FrameDuplicateCount++; return false; } } // Use for TKIP MIC s_vGetDASA(skb->data+4, &cbHeaderSize, &pDevice->sRxEthHeader); // filter packet send from myself if (ether_addr_equal(pDevice->sRxEthHeader.abySrcAddr, pDevice->abyCurrentNetAddr)) return false; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) { if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) { p802_11Header = (PS802_11Header)(pbyFrame); // get SA NodeIndex if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(p802_11Header->abyAddr2), &iSANodeIndex)) { pMgmt->sNodeDBTable[iSANodeIndex].ulLastRxJiffer = jiffies; pMgmt->sNodeDBTable[iSANodeIndex].uInActiveCount = 0; } } } if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex) == true) { return false; } } if (IS_FC_WEP(pbyFrame)) { bool bRxDecryOK = false; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx WEP pkt\n"); bIsWEP = true; if ((pDevice->bEnableHostWEP) && (iSANodeIndex >= 0)) { pKey = &STempKey; pKey->byCipherSuite = pMgmt->sNodeDBTable[iSANodeIndex].byCipherSuite; pKey->dwKeyIndex = pMgmt->sNodeDBTable[iSANodeIndex].dwKeyIndex; pKey->uKeyLength = pMgmt->sNodeDBTable[iSANodeIndex].uWepKeyLength; pKey->dwTSC47_16 = pMgmt->sNodeDBTable[iSANodeIndex].dwTSC47_16; pKey->wTSC15_0 = pMgmt->sNodeDBTable[iSANodeIndex].wTSC15_0; memcpy(pKey->abyKey, &pMgmt->sNodeDBTable[iSANodeIndex].abyWepKey[0], pKey->uKeyLength ); bRxDecryOK = s_bHostWepRxEncryption(pDevice, pbyFrame, FrameSize, pbyRsr, pMgmt->sNodeDBTable[iSANodeIndex].bOnFly, pKey, pbyNewRsr, &bExtIV, &wRxTSC15_0, &dwRxTSC47_16); } else { bRxDecryOK = s_bHandleRxEncryption(pDevice, pbyFrame, FrameSize, pbyRsr, pbyNewRsr, &pKey, &bExtIV, &wRxTSC15_0, &dwRxTSC47_16); } if (bRxDecryOK) { if ((*pbyNewRsr & NEWRSR_DECRYPTOK) == 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ICV Fail\n"); if ((pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA) || (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) || (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) { pDevice->s802_11Counter.TKIPICVErrors++; } else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP)) { pDevice->s802_11Counter.CCMPDecryptErrors++; } else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_WEP)) { // pDevice->s802_11Counter.WEPICVErrorCount.QuadPart++; } } return false; } } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WEP Func Fail\n"); return false; } if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP)) FrameSize -= 8; // Message Integrity Code else FrameSize -= 4; // 4 is ICV } // // RX OK // //remove the CRC length FrameSize -= ETH_FCS_LEN; if ((!(*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI))) && // unicast address (IS_FRAGMENT_PKT((skb->data+4))) ) { // defragment bDeFragRx = WCTLbHandleFragment(pDevice, (PS802_11Header)(skb->data+4), FrameSize, bIsWEP, bExtIV); pDevice->s802_11Counter.ReceivedFragmentCount++; if (bDeFragRx) { // defrag complete skb = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].skb; FrameSize = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength; } else { return false; } } // Management & Control frame Handle if ((IS_TYPE_DATA((skb->data+4))) == false) { // Handle Control & Manage Frame if (IS_TYPE_MGMT((skb->data+4))) { unsigned char *pbyData1; unsigned char *pbyData2; pRxPacket->p80211Header = (PUWLAN_80211HDR)(skb->data+4); pRxPacket->cbMPDULen = FrameSize; pRxPacket->uRSSI = *pbyRSSI; pRxPacket->bySQ = *pbySQ; HIDWORD(pRxPacket->qwLocalTSF) = cpu_to_le32(HIDWORD(*pqwTSFTime)); LODWORD(pRxPacket->qwLocalTSF) = cpu_to_le32(LODWORD(*pqwTSFTime)); if (bIsWEP) { // strip IV pbyData1 = WLAN_HDR_A3_DATA_PTR(skb->data+4); pbyData2 = WLAN_HDR_A3_DATA_PTR(skb->data+4) + 4; for (ii = 0; ii < (FrameSize - 4); ii++) { *pbyData1 = *pbyData2; pbyData1++; pbyData2++; } } pRxPacket->byRxRate = s_byGetRateIdx(*pbyRxRate); pRxPacket->byRxChannel = (*pbyRxSts) >> 2; //PLICE_DEBUG-> //EnQueue(pDevice,pRxPacket); #ifdef THREAD EnQueue(pDevice, pRxPacket); //up(&pDevice->mlme_semaphore); //Enque (pDevice->FirstRecvMngList,pDevice->LastRecvMngList,pMgmt); #else #ifdef TASK_LET EnQueue(pDevice, pRxPacket); tasklet_schedule(&pDevice->RxMngWorkItem); #else vMgrRxManagePacket((void *)pDevice, pDevice->pMgmt, pRxPacket); //tasklet_schedule(&pDevice->RxMngWorkItem); #endif #endif //PLICE_DEBUG<- //vMgrRxManagePacket((void *)pDevice, pDevice->pMgmt, pRxPacket); // hostap Deamon handle 802.11 management if (pDevice->bEnableHostapd) { skb->dev = pDevice->apdev; skb->data += 4; skb->tail += 4; skb_put(skb, FrameSize); skb_reset_mac_header(skb); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); return true; } } else { // Control Frame };
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; int status = req->status; switch (status) { /* normal completion */ case 0: skb_put(skb, req->actual); if (dev->unwrap) status = dev->unwrap(skb); if (status < 0 || ETH_HLEN > skb->len || skb->len > ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb->len); break; } skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; /* no buffer copies needed, unless hardware can't * use skb buffers. */ status = netif_rx(skb); skb = NULL; break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } if (skb) dev_kfree_skb_any(skb); if (!netif_running(dev->net)) { clean: spin_lock(&dev->req_rx_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_rx_lock); req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); }
static int sendup_buffer (struct net_device *dev) { /* on entry, command is in ltdmacbuf, data in ltdmabuf */ /* called from idle, non-reentrant */ int dnode, snode, llaptype, len; int sklen; struct sk_buff *skb; struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf; if (ltc->command != LT_RCVLAP) { printk("unknown command 0x%02x from ltpc card\n",ltc->command); return(-1); } dnode = ltc->dnode; snode = ltc->snode; llaptype = ltc->laptype; len = ltc->length; sklen = len; if (llaptype == 1) sklen += 8; /* correct for short ddp */ if(sklen > 800) { printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n", dev->name,sklen); return -1; } if ( (llaptype==0) || (llaptype>2) ) { printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype); return -1; } skb = dev_alloc_skb(3+sklen); if (skb == NULL) { printk("%s: dropping packet due to memory squeeze.\n", dev->name); return -1; } skb->dev = dev; if (sklen > len) skb_reserve(skb,8); skb_put(skb,len+3); skb->protocol = htons(ETH_P_LOCALTALK); /* add LLAP header */ skb->data[0] = dnode; skb->data[1] = snode; skb->data[2] = llaptype; skb_reset_mac_header(skb); /* save pointer to llap header */ skb_pull(skb,3); /* copy ddp(s,e)hdr + contents */ skb_copy_to_linear_data(skb, ltdmabuf, len); skb_reset_transport_header(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; /* toss it onwards */ netif_rx(skb); return 0; }
/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, u16 vlan_tci, int polling) { __vlan_hwaccel_put_tag(skb, vlan_tci); return polling ? netif_receive_skb(skb) : netif_rx(skb); }