/* * The higher levels take care of making this non-reentrant (it's * called with bh's disabled). */ static netdev_tx_t loopback_xmit(struct sk_buff *skb, struct net_device *dev) { struct pcpu_lstats *pcpu_lstats, *lb_stats; int len; #ifdef CONFIG_VE if (unlikely(get_exec_env()->disable_net)) { kfree_skb(skb); return 0; } #endif skb_orphan(skb); skb->protocol = eth_type_trans(skb, dev); /* it's OK to use per_cpu_ptr() because BHs are off */ pcpu_lstats = dev->ml_priv; lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); len = skb->len; if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { lb_stats->bytes += len; lb_stats->packets++; } else lb_stats->drops++; return NETDEV_TX_OK; }
/** * tipc_msg_reverse(): swap source and destination addresses and add error code * @buf: buffer containing message to be reversed * @dnode: return value: node where to send message after reversal * @err: error code to be set in message * Consumes buffer if failure * Returns true if success, otherwise false */ bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err) { struct tipc_msg *msg = buf_msg(buf); uint imp = msg_importance(msg); struct tipc_msg ohdr; uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); if (skb_linearize(buf)) goto exit; if (msg_dest_droppable(msg)) goto exit; if (msg_errcode(msg)) goto exit; memcpy(&ohdr, msg, msg_hdr_sz(msg)); imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE); if (msg_isdata(msg)) msg_set_importance(msg, imp); msg_set_errcode(msg, err); msg_set_origport(msg, msg_destport(&ohdr)); msg_set_destport(msg, msg_origport(&ohdr)); msg_set_prevnode(msg, tipc_own_addr); if (!msg_short(msg)) { msg_set_orignode(msg, msg_destnode(&ohdr)); msg_set_destnode(msg, msg_orignode(&ohdr)); } msg_set_size(msg, msg_hdr_sz(msg) + rdsz); skb_trim(buf, msg_size(msg)); skb_orphan(buf); *dnode = msg_orignode(&ohdr); return true; exit: kfree_skb(buf); return false; }
/* Process an incoming IP datagram fragment. */ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) { struct net_device *dev = skb->dev ? : skb_dst(skb)->dev; int vif = l3mdev_master_ifindex_rcu(dev); struct ipq *qp; __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS); skb_orphan(skb); /* Lookup (or create) queue header */ qp = ip_find(net, ip_hdr(skb), user, vif); if (qp) { int ret; spin_lock(&qp->q.lock); ret = ip_frag_queue(qp, skb); spin_unlock(&qp->q.lock); ipq_put(qp); return ret; } __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -ENOMEM; }
static void sc_send_8023(struct sk_buff *skb, struct net_device *dev) { struct ethhdr *eh; if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) return; /* drop conntrack reference */ nf_reset(skb); /* detach skb from CAPWAP */ skb_orphan(skb); secpath_reset(skb); /* drop any routing info */ skb_dst_drop(skb); skb->dev = dev; skb_reset_mac_header(skb); eh = eth_hdr(skb); if (likely(eth_proto_is_802_3(eh->h_proto))) skb->protocol = eh->h_proto; else skb->protocol = htons(ETH_P_802_2); skb_set_network_header(skb, ETH_HLEN); /* Force the device to verify it. */ skb->ip_summed = CHECKSUM_NONE; dev_queue_xmit(skb); }
/* * The higher levels take care of making this non-reentrant (it's * called with bh's disabled). */ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *lb_stats; skb_orphan(skb); skb->protocol=eth_type_trans(skb,dev); skb->dev=dev; #ifndef LOOPBACK_MUST_CHECKSUM skb->ip_summed = CHECKSUM_UNNECESSARY; #endif if (skb_shinfo(skb)->tso_size) { BUG_ON(skb->protocol != htons(ETH_P_IP)); BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); emulate_large_send_offload(skb); return 0; } dev->last_rx = jiffies; lb_stats = &per_cpu(loopback_stats, get_cpu()); lb_stats->rx_bytes += skb->len; lb_stats->tx_bytes += skb->len; lb_stats->rx_packets++; lb_stats->tx_packets++; put_cpu(); netif_rx(skb); return(0); }
/* Requeue received messages for a kcm socket to other kcm sockets. This is * called with a kcm socket is receive disabled. * RX mux lock held. */ static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head) { struct sk_buff *skb; struct kcm_sock *kcm; while ((skb = __skb_dequeue(head))) { /* Reset destructor to avoid calling kcm_rcv_ready */ skb->destructor = sock_rfree; skb_orphan(skb); try_again: if (list_empty(&mux->kcm_rx_waiters)) { skb_queue_tail(&mux->rx_hold_queue, skb); continue; } kcm = list_first_entry(&mux->kcm_rx_waiters, struct kcm_sock, wait_rx_list); if (kcm_queue_rcv_skb(&kcm->sk, skb)) { /* Should mean socket buffer full */ list_del(&kcm->wait_rx_list); kcm->rx_wait = false; /* Commit rx_wait to read in kcm_free */ smp_wmb(); goto try_again; } } }
/* The higher levels take care of making this non-reentrant (it's * called with bh's disabled). */ static netdev_tx_t loopback_xmit(struct sk_buff *skb, struct net_device *dev) { struct pcpu_lstats *lb_stats; int len; skb_tx_timestamp(skb); skb_orphan(skb); /* Before queueing this packet to netif_rx(), * make sure dst is refcounted. */ skb_dst_force(skb); skb->protocol = eth_type_trans(skb, dev); /* it's OK to use per_cpu_ptr() because BHs are off */ lb_stats = this_cpu_ptr(dev->lstats); len = skb->len; if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { u64_stats_update_begin(&lb_stats->syncp); lb_stats->bytes += len; lb_stats->packets++; u64_stats_update_end(&lb_stats->syncp); } return NETDEV_TX_OK; }
static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct sk_buff_head *list = &sk->sk_receive_queue; if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) return -ENOMEM; if (!sk_rmem_schedule(sk, skb, skb->truesize)) return -ENOBUFS; skb->dev = NULL; skb_orphan(skb); skb->sk = sk; skb->destructor = kcm_rfree; atomic_add(skb->truesize, &sk->sk_rmem_alloc); sk_mem_charge(sk, skb->truesize); skb_queue_tail(list, skb); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); return 0; }
/** Handle some skbs on a varp socket (if any). * * @param fd socket file descriptor * @param n maximum number of skbs to handle * @return number of skbs handled */ static int handle_varp_sock(int fd, int n){ int ret = 0; int err = 0; struct sk_buff *skb; struct socket *sock = NULL; sock = sockfd_lookup(fd, &err); if (!sock){ wprintf("> no sock for fd=%d\n", fd); goto exit; } for( ; ret < n; ret++){ if(!sock->sk) break; skb = skb_dequeue(&sock->sk->sk_receive_queue); if(!skb) break; // Call the skb destructor so it isn't charged to the socket anymore. // An skb from a socket receive queue is charged to the socket // by skb_set_owner_r() until its destructor is called. // If the destructor is not called the socket will run out of // receive queue space and be unable to accept incoming skbs. // The destructor used is sock_rfree(), see 'include/net/sock.h'. // Other destructors: sock_wfree, sk_stream_rfree. skb_orphan(skb); handle_varp_skb(skb); } sockfd_put(sock); exit: dprintf("< ret=%d\n", ret); return ret; }
static int cmtp_session(void *arg) { struct cmtp_session *session = arg; struct sock *sk = session->sock->sk; struct sk_buff *skb; wait_queue_t wait; BT_DBG("session %p", session); set_user_nice(current, -15); init_waitqueue_entry(&wait, current); add_wait_queue(sk_sleep(sk), &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); if (atomic_read(&session->terminate)) break; if (sk->sk_state != BT_CONNECTED) break; while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) cmtp_recv_frame(session, skb); else kfree_skb(skb); } cmtp_process_transmit(session); schedule(); } __set_current_state(TASK_RUNNING); rem
/* On transmit, associate with the tunnel socket */ static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb) { skb_orphan(skb); sock_hold(sk); skb->sk = sk; skb->destructor = vxlan_sock_put; }
static __inline__ int ethertap_rx_skb(struct sk_buff *skb, struct net_device *dev) { struct net_local *lp = (struct net_local *)dev->priv; #ifdef CONFIG_ETHERTAP_MC struct ethhdr *eth = (struct ethhdr*)(skb->data + 2); #endif int len = skb->len; if (len < 16) { printk(KERN_DEBUG "%s : rx len = %d\n", dev->name, len); kfree_skb(skb); return -EINVAL; } if (NETLINK_CREDS(skb)->uid) { printk(KERN_INFO "%s : user %d\n", dev->name, NETLINK_CREDS(skb)->uid); kfree_skb(skb); return -EPERM; } #ifdef CONFIG_ETHERTAP_MC if (!(dev->flags&(IFF_NOARP|IFF_PROMISC))) { int drop = 0; if (eth->h_dest[0]&1) { if (!(ethertap_mc_hash(eth->h_dest)&lp->groups)) drop = 1; } else if (memcmp(eth->h_dest, dev->dev_addr, 6) != 0) drop = 1; if (drop) { if (ethertap_debug > 3) printk(KERN_DEBUG "%s : not for us\n", dev->name); kfree_skb(skb); return -EINVAL; } } #endif if (skb_shared(skb)) { struct sk_buff *skb2 = skb; skb = skb_clone(skb, GFP_KERNEL); /* Clone the buffer */ if (skb==NULL) { kfree_skb(skb2); return -ENOBUFS; } kfree_skb(skb2); } else skb_orphan(skb); skb_pull(skb, 2); skb->dev = dev; skb->protocol=eth_type_trans(skb,dev); memset(skb->cb, 0, sizeof(skb->cb)); lp->stats.rx_packets++; lp->stats.rx_bytes+=len; netif_rx(skb); dev->last_rx = jiffies; return len; }
static void hci_smd_recv_event(void) { int len = 0; int rc = 0; struct sk_buff *skb = NULL; struct hci_smd_data *hsmd = &hs; wake_lock(&hs.wake_lock_rx); len = smd_read_avail(hsmd->event_channel); if (len > HCI_MAX_FRAME_SIZE) { BT_ERR("Frame larger than the allowed size, flushing frame"); rc = smd_read(hsmd->event_channel, NULL, len); goto out_event; } while (len > 0) { skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("Error in allocating socket buffer"); smd_read(hsmd->event_channel, NULL, len); goto out_event; } rc = smd_read(hsmd->event_channel, skb_put(skb, len), len); if (rc < len) { BT_ERR("Error in reading from the event channel"); goto out_event; } skb->dev = (void *)hsmd->hdev; bt_cb(skb)->pkt_type = HCI_EVENT_PKT; skb_orphan(skb); rc = hci_recv_frame(skb); if (rc < 0) { BT_ERR("Error in passing the packet to HCI Layer"); /* * skb is getting freed in hci_recv_frame, making it * to null to avoid multiple access */ skb = NULL; goto out_event; } len = smd_read_avail(hsmd->event_channel); /* * Start the timer to monitor whether the Rx queue is * empty for releasing the Rx wake lock */ BT_DBG("Rx Timer is starting"); mod_timer(&hsmd->rx_q_timer, jiffies + msecs_to_jiffies(RX_Q_MONITOR)); } out_event: release_lock(); if (rc) kfree_skb(skb); }
/* In the event of a remote destination, it's possible that we would have * matches against an old socket (particularly a TIME-WAIT socket). This * causes havoc down the line (ip_local_out et. al. expect regular sockets * and invalid memory accesses will happen) so simply drop the association * in this case. */ static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb) { /* If dev is set, the packet came from the LOCAL_IN callback and * not from a local TCP socket. */ if (skb->dev) skb_orphan(skb); }
/* * Prepends an ISI header and sends a datagram. */ static int pn_send(struct sk_buff *skb, struct net_device *dev, u16 dst, u16 src, u8 res, u8 irq) { struct phonethdr *ph; int err; if (skb->len + 2 > 0xffff /* Phonet length field limit */ || skb->len + sizeof(struct phonethdr) > dev->mtu) { err = -EMSGSIZE; goto drop; } /* Broadcast sending is not implemented */ if (pn_addr(dst) == PNADDR_BROADCAST) { err = -EOPNOTSUPP; goto drop; } skb_reset_transport_header(skb); WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */ skb_push(skb, sizeof(struct phonethdr)); skb_reset_network_header(skb); ph = pn_hdr(skb); ph->pn_rdev = pn_dev(dst); ph->pn_sdev = pn_dev(src); ph->pn_res = res; ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph)); ph->pn_robj = pn_obj(dst); ph->pn_sobj = pn_obj(src); skb->protocol = htons(ETH_P_PHONET); skb->priority = 0; skb->dev = dev; if (pn_addr(src) == pn_addr(dst)) { skb_reset_mac_header(skb); skb->pkt_type = PACKET_LOOPBACK; skb_orphan(skb); if (irq) netif_rx(skb); else netif_rx_ni(skb); err = 0; } else { err = dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len); if (err < 0) { err = -EHOSTUNREACH; goto drop; } err = dev_queue_xmit(skb); } return err; drop: kfree_skb(skb); return err; }
static inline int sn_send_tx_queue(struct sn_queue *queue, struct sn_device* dev, struct sk_buff* skb) { struct sn_tx_metadata tx_meta; int ret = NET_XMIT_DROP; #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) if (queue->tx.opts.tci) { skb = vlan_insert_tag(skb, queue->tx.opts.tci); if (unlikely(!skb)) goto skip_send; } #else if (queue->tx.opts.tci) { skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), queue->tx.opts.tci); if (unlikely(!skb)) goto skip_send; } if (queue->tx.opts.outer_tci) { skb = vlan_insert_tag(skb, htons(ETH_P_8021AD), queue->tx.opts.outer_tci); if (unlikely(!skb)) goto skip_send; } #endif skb_orphan(skb); sn_set_tx_metadata(skb, &tx_meta); ret = dev->ops->do_tx(queue, skb, &tx_meta); skip_send: switch (ret) { case NET_XMIT_CN: queue->tx.stats.throttled++; /* fall through */ case NET_XMIT_SUCCESS: queue->tx.stats.packets++; queue->tx.stats.bytes += skb->len; break; case NET_XMIT_DROP: queue->tx.stats.dropped++; break; case SN_NET_XMIT_BUFFERED: /* should not free skb */ return NET_XMIT_SUCCESS; } dev_kfree_skb(skb); return ret; }
static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) { #ifdef NL_EMULATE_DEV if (sk->protinfo.af_netlink->handler) { skb_orphan(skb); sk->protinfo.af_netlink->handler(sk->protocol, skb); return 0; } else #endif if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf && !test_bit(0, &sk->protinfo.af_netlink->state)) { skb_orphan(skb); skb_set_owner_r(skb, sk); skb_queue_tail(&sk->receive_queue, skb); sk->data_ready(sk, skb->len); return 0; } return -1; }
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_device *rcv = NULL; struct veth_priv *priv, *rcv_priv; struct veth_net_stats *stats, *rcv_stats; int length, cpu; skb_orphan(skb); priv = netdev_priv(dev); rcv = priv->peer; rcv_priv = netdev_priv(rcv); cpu = smp_processor_id(); stats = per_cpu_ptr(priv->stats, cpu); rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu); if (!(rcv->flags & IFF_UP)) goto tx_drop; if (skb->len > (rcv->mtu + MTU_PAD)) goto rx_drop; skb->tstamp.tv64 = 0; skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, rcv); if (dev->features & NETIF_F_NO_CSUM) skb->ip_summed = rcv_priv->ip_summed; skb->mark = 0; secpath_reset(skb); nf_reset(skb); length = skb->len; stats->tx_bytes += length; stats->tx_packets++; rcv_stats->rx_bytes += length; rcv_stats->rx_packets++; netif_rx(skb); return NETDEV_TX_OK; tx_drop: kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; rx_drop: kfree_skb(skb); rcv_stats->rx_dropped++; return NETDEV_TX_OK; }
static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) { struct netlink_opt *nlk = nlk_sk(sk); #ifdef NL_EMULATE_DEV if (nlk->handler) { skb_orphan(skb); nlk->handler(sk->sk_protocol, skb); return 0; } else #endif if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && !test_bit(0, &nlk->state)) { skb_orphan(skb); skb_set_owner_r(skb, sk); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf; } return -1; }
static void sock_data_ready(struct sock *sk, int len){ struct sk_buff *skb; //read_lock(&sk->sk_callback_lock); skb = skb_dequeue(&sk->sk_receive_queue); if(skb){ skb_orphan(skb); } //read_unlock(&sk->sk_callback_lock); if(skb){ handle_varp_skb(skb); } }
int nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) { if (inet_sk(sk)->transparent) { skb_orphan(skb); skb->sk = sk; skb->destructor = nf_tproxy_destructor; return 1; } else nf_tproxy_put_sock(sk); return 0; }
static struct sk_buff * ns_ct_ipv4_gather_frags(struct sk_buff *skb, int (*okfn)(struct sk_buff*), s32 user) { skb_orphan(skb); local_bh_disable(); skb = ip_defrag(skb, user); local_bh_enable(); if (skb) ip_send_check(skb->nh.iph); return skb; }
static void hci_smd_recv_data(void) { int len = 0; int rc = 0; struct sk_buff *skb = NULL; struct hci_smd_data *hsmd = &hs; wake_lock(&hs.wake_lock_rx); len = smd_read_avail(hsmd->data_channel); if (len > HCI_MAX_FRAME_SIZE) { BT_ERR("Frame larger than the allowed size, flushing frame"); smd_read(hsmd->data_channel, NULL, len); goto out_data; } if (len <= 0) goto out_data; skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("Error in allocating socket buffer"); smd_read(hsmd->data_channel, NULL, len); goto out_data; } rc = smd_read(hsmd->data_channel, skb_put(skb, len), len); if (rc < len) { BT_ERR("Error in reading from the channel"); goto out_data; } skb->dev = (void *)hsmd->hdev; bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; skb_orphan(skb); rc = hci_recv_frame(skb); if (rc < 0) { BT_ERR("Error in passing the packet to HCI Layer"); skb = NULL; goto out_data; } BT_DBG("Rx Timer is starting"); mod_timer(&hsmd->rx_q_timer, jiffies + msecs_to_jiffies(RX_Q_MONITOR)); out_data: release_lock(); if (rc) kfree_skb(skb); }
static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb) { pr_debug("len %d\n", skb->len); if (!ndev) { kfree_skb(skb); return -ENODEV; } /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); return ndev->ops->send(ndev, skb); }
int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len; struct iphdr *iph; int err; /* inlined skb_scrub_packet() */ if (xnet) skb_orphan(skb); skb->pkt_type = PACKET_HOST; #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0) skb->skb_iif = 0; #endif skb_dst_drop(skb); skb->mark = 0; secpath_reset(skb); nf_reset(skb); skb->rxhash = 0; skb_dst_set(skb, &rt_dst(rt)); #if 0 /* Do not clear ovs_skb_cb. It will be done in gso code. */ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #endif /* Push down and install the IP header. */ __skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; tunnel_ip_select_ident(skb, (const struct iphdr *)skb_inner_network_header(skb), &rt_dst(rt)); err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; return pkt_len; }
/* consumes sk */ void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) { /* assigning tw sockets complicates things; most * skb->sk->X checks would have to test sk->sk_state first */ if (sk->sk_state == TCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } skb_orphan(skb); skb->sk = sk; skb->destructor = nf_tproxy_destructor; }
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) { int err; skb_orphan(skb); local_bh_disable(); err = ip_defrag(skb, user); local_bh_enable(); if (!err) skb->local_df = 1; return err; }
static netdev_tx_t kni_loopback_xmit(struct sk_buff *skb, struct net_device *dev) { skb_orphan(skb); if (skb->vlan_tci == 1){ //Need to be sent to the fastpath kni_net_tx(skb, dev); }else{ /* it's OK to use per_cpu_ptr() because BHs are off */ netif_rx(skb); } return NETDEV_TX_OK; }
/* Returns new sk_buff, or NULL */ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) { int err; /* 使包成为不属于任何套接字的孤包 */ skb_orphan(skb); local_bh_disable(); err = ip_defrag(skb, user); local_bh_enable(); if (!err) ip_send_check(ip_hdr(skb)); return err; }
static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_private *np = netdev_priv(dev); if (!skb_remove_foreign_references(skb)) { np->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } dst_release(skb->dst); skb->dst = NULL; skb_orphan(skb); np->stats.tx_bytes += skb->len; np->stats.tx_packets++; /* Switch to loopback context. */ dev = np->loopback_dev; np = netdev_priv(dev); np->stats.rx_bytes += skb->len; np->stats.rx_packets++; if (skb->ip_summed == CHECKSUM_HW) { /* Defer checksum calculation. */ skb->proto_csum_blank = 1; /* Must be a local packet: assert its integrity. */ skb->proto_data_valid = 1; } skb->ip_summed = skb->proto_data_valid ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */ skb->protocol = eth_type_trans(skb, dev); skb->dev = dev; dev->last_rx = jiffies; /* Flush netfilter context: rx'ed skbuffs not expected to have any. */ nf_reset(skb); secpath_reset(skb); netif_rx(skb); return 0; }