int raw_rcv(struct sock *sk, struct sk_buff *skb) { if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { kfree_skb(skb); return NET_RX_DROP; } nf_reset(skb); ns_reset(skb); skb_push(skb, skb->data - skb->nh.raw); raw_rcv_skb(sk, skb); return 0; }
static inline int ip_local_deliver_finish2(struct sk_buff *skb, u_int8_t protocol) { struct vrf *vrf = if_dev_vrf(skb->dev); rcu_read_lock(); { /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */ //int protocol = skb->nh.iph->protocol; int hash, raw; struct net_protocol *ipprot; resubmit: raw = raw_local_deliver(skb, protocol); hash = protocol & (MAX_INET_PROTOS - 1); if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) { int ret; if (!ipprot->no_policy) { if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { kfree_skb(skb); goto out; } nf_reset(skb); ns_reset(skb); } ret = ipprot->handler(skb); if (ret < 0) { protocol = -ret; goto resubmit; } IP_INC_STATS_BH(vrf, IPSTATS_MIB_INDELIVERS); } else { if (!raw) { if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { IP_INC_STATS_BH(vrf, IPSTATS_MIB_INUNKNOWNPROTOS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0); } } else IP_INC_STATS_BH(vrf, IPSTATS_MIB_INDELIVERS); kfree_skb(skb); } } out: rcu_read_unlock(); return 0; }
static int ip6ip6_rcv(struct sk_buff *skb) { struct ipv6hdr *ipv6h; struct ip6_tnl *t; ipv6h = skb->nh.ipv6h; read_lock(&ip6ip6_lock); if ((t = ip6ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) { if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { read_unlock(&ip6ip6_lock); goto discard; } if (!ip6_tnl_rcv_ctl(t)) { t->stat.rx_dropped++; read_unlock(&ip6ip6_lock); goto discard; } secpath_reset(skb); skb->mac.raw = skb->nh.raw; skb->nh.raw = skb->data; skb->protocol = htons(ETH_P_IPV6); skb->pkt_type = PACKET_HOST; memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); skb->dev = t->dev; dst_release(skb->dst); skb->dst = NULL; nf_reset(skb); ns_reset(skb); if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) ipv6_copy_dscp(ipv6h, skb->nh.ipv6h); ip6ip6_ecn_decapsulate(ipv6h, skb); t->stat.rx_packets++; t->stat.rx_bytes += skb->len; netif_rx(skb); read_unlock(&ip6ip6_lock); return 0; } read_unlock(&ip6ip6_lock); return 1; discard: kfree_skb(skb); return 0; }
s32 mpls_route(struct sk_buff * skb, u16 proto) { /*Here process EXPLICIT-NULL pop ,6PE, 6VPE, VRF*/ nf_reset(skb); ns_reset(skb); dst_release(skb->dst); skb->dst = NULL ; skb->nh.raw = skb->data; skb->vid = 0; skb->protocol = htons(proto) ; skb->ff_flag = ff_set_flag(skb, DRV_FF_FLAG_LINUX_FORWARD); MPLS_DEBUG_COUNTER_INC(mpls_route); MPLS_DEBUG_FORWARD("mpls_route: skb->iif %x, skb->dev %s, proto %d.\n" , skb->iif, skb->dev ? skb->dev->name : "Unknown", proto); MPLS_DEBUG_SKB(skb, "mpls_route: \n"); return netif_receive_skb(skb); }
int ip_local_deliver_finish(struct sk_buff *skb) { int ihl = skb->nh.iph->ihl*4; struct nf_conn *ct = (struct nf_conn *)skb->nfct; struct vrf *vrf = if_dev_vrf(skb->dev); __skb_pull(skb, ihl); skb->h.raw = skb->data; if(ipsec_data_packet(skb)) { return ipsec_receive_skb(skb); } /* 上送本机的报文,若经过流分类,则需要下发快转表项 如果是ipsec数据报文则不需要下发快转表,数据报文会在后面 下发快转表*/ if (ct && test_bit(IPS_CONFIRMED_BIT, &ct->status)) { if (0 == smp_processor_id ()) { NF_GET_CPU(ct) = 0; } if(ipsec_udp_float_packet(skb)) { skb->ff_flag = ff_set_flag(skb, DRV_FF_FLAG_IPSEC_DPCRYPT); skb->ff_flag = ff_clr_flag(skb, DRV_FF_FLAG_LINUX); } else { skb->ff_flag = ff_set_flag(skb, DRV_FF_FLAG_LINUX); } if(nf_ct_tcp_loose) { ff_items_add_for_simple_state(skb); } else { ff_items_add_basic_on_session(skb); } } if (ipsec_conn_packet(skb)) { #ifdef CONFIG_NETSESSION struct net_session *ns = (struct net_session *)(skb->ns); if(ns) { ns_ff_set_flag(skb, NS_FF_IPSEC_DPCRYPT); } #endif ipsec_pkt_send2user(skb); return 0; } rcu_read_lock(); { /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */ int protocol = skb->nh.iph->protocol; int hash, raw; struct net_protocol *ipprot; resubmit: raw = raw_local_deliver(skb, protocol); hash = protocol & (MAX_INET_PROTOS - 1); if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) { int ret; if (!ipprot->no_policy) { if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { kfree_skb(skb); goto out; } nf_reset(skb); ns_reset(skb); } conplat_bh_disable(); ret = ipprot->handler(skb); conplat_bh_enable(); if (ret < 0) { protocol = -ret; goto resubmit; } IP_INC_STATS_BH(vrf, IPSTATS_MIB_INDELIVERS); } else { if (!raw) { if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { IP_INC_STATS_BH(vrf, IPSTATS_MIB_INUNKNOWNPROTOS); if(g_icmp_status.prot_unreach == IP_OPTION_SUPPORT) { icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0); } } } else IP_INC_STATS_BH(vrf, IPSTATS_MIB_INDELIVERS); kfree_skb(skb); } } out: rcu_read_unlock(); return 0; }
static int ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net_device_stats *stats = &t->stat; struct ipv6hdr *ipv6h = skb->nh.ipv6h; int encap_limit = -1; struct ipv6_tel_txoption opt; __u16 offset; struct flowi fl; struct dst_entry *dst; struct net_device *tdev; int mtu; int max_headroom = sizeof(struct ipv6hdr); u8 proto; int err; int pkt_len; int dsfield; if (t->recursion++) { stats->collisions++; goto tx_err; } if (skb->protocol != htons(ETH_P_IPV6) || !ip6_tnl_xmit_ctl(t) || ip6ip6_tnl_addr_conflict(t, ipv6h)) goto tx_err; if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) { struct ipv6_tlv_tnl_enc_lim *tel; tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset]; if (tel->encap_limit == 0) { icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD, offset + 2, skb->dev); goto tx_err; } encap_limit = tel->encap_limit - 1; } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; memcpy(&fl, &t->fl, sizeof (fl)); proto = fl.proto; dsfield = ipv6_get_dsfield(ipv6h); if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)) fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); if ((dst = ip6_tnl_dst_check(t)) != NULL) dst_hold(dst); else { dst = ip6_route_output(if_dev_vrf(dev), if_dev_litevrf_id(dev), NULL, &fl); if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0) < 0) goto tx_err_link_failure; } tdev = dst->dev; if (tdev == dev) { stats->collisions++; if (net_ratelimit()) printk(KERN_WARNING "%s: Local routing loop detected!\n", t->parms.name); goto tx_err_dst_release; } mtu = dst_mtu(dst) - sizeof (*ipv6h); if (encap_limit >= 0) { max_headroom += 8; mtu -= 8; } if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (skb->dst) skb->dst->ops->update_pmtu(skb->dst, mtu); if (skb->len > mtu) { icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); goto tx_err_dst_release; } /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom += LL_RESERVED_SPACE(tdev); if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { struct sk_buff *new_skb; if (!(new_skb = skb_realloc_headroom(skb, max_headroom))) goto tx_err_dst_release; if (skb->sk) skb_set_owner_w(new_skb, skb->sk); kfree_skb(skb); skb = new_skb; } dst_release(skb->dst); skb->dst = dst_clone(dst); skb->h.raw = skb->nh.raw; if (encap_limit >= 0) { init_tel_txopt(&opt, encap_limit); ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); } skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr)); ipv6h = skb->nh.ipv6h; *(__be32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000); dsfield = INET_ECN_encapsulate(0, dsfield); ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); ipv6h->hop_limit = t->parms.hop_limit; ipv6h->nexthdr = proto; ipv6_addr_copy(&ipv6h->saddr, &fl.fl6_src); ipv6_addr_copy(&ipv6h->daddr, &fl.fl6_dst); nf_reset(skb); ns_reset(skb); pkt_len = skb->len; err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); if (net_xmit_eval(err) == 0) { stats->tx_bytes += pkt_len; stats->tx_packets++; } else { stats->tx_errors++; stats->tx_aborted_errors++; } ip6_tnl_dst_store(t, dst); t->recursion--; return 0; tx_err_link_failure: stats->tx_carrier_errors++; dst_link_failure(skb); tx_err_dst_release: dst_release(dst); tx_err: stats->tx_errors++; stats->tx_dropped++; kfree_skb(skb); t->recursion--; return 0; }
/* ** Name: void ns_interrupt(dpeth_t * dep) ** Function: Handles interrupt. */ static void ns_interrupt(dpeth_t * dep) { int isr, tsr; int queue; while ((isr = inb_reg0(dep, DP_ISR)) != 0) { outb_reg0(dep, DP_ISR, isr); if (isr & (ISR_PTX | ISR_TXE)) { tsr = inb_reg0(dep, DP_TSR); if (tsr & TSR_PTX) { dep->de_stat.ets_packetT++; } if (tsr & TSR_COL) dep->de_stat.ets_collision++; if (tsr & (TSR_ABT | TSR_FU)) { dep->de_stat.ets_fifoUnder++; } if ((isr & ISR_TXE) || (tsr & (TSR_CRS | TSR_CDH | TSR_OWC))) { printf("%s: got send Error (0x%02X)\n", dep->de_name, tsr); dep->de_stat.ets_sendErr++; } queue = dep->de_sendq_tail; if (!(dep->de_sendq[queue].sq_filled)) { /* Hardware bug? */ printf("%s: transmit interrupt, but not sending\n", dep->de_name); continue; } dep->de_sendq[queue].sq_filled = FALSE; if (++queue == dep->de_sendq_nr) queue = 0; dep->de_sendq_tail = queue; if (dep->de_sendq[queue].sq_filled) { ns_start_xmit(dep, dep->de_sendq[queue].sq_size, dep->de_sendq[queue].sq_sendpage); } if (dep->de_flags & DEF_SENDING) { ns_send(dep, TRUE, dep->de_send_s); } } if (isr & ISR_PRX) { ns_recv(dep, TRUE, 0); } if (isr & ISR_RXE) { printf("%s: got recv Error (0x%04X)\n", dep->de_name, inb_reg0(dep, DP_RSR)); dep->de_stat.ets_recvErr++; } if (isr & ISR_CNT) { dep->de_stat.ets_CRCerr += inb_reg0(dep, DP_CNTR0); dep->de_stat.ets_recvErr += inb_reg0(dep, DP_CNTR1); dep->de_stat.ets_fifoOver += inb_reg0(dep, DP_CNTR2); } if (isr & ISR_OVW) { printf("%s: got overwrite warning\n", dep->de_name); } if (isr & ISR_RDC) { /* Nothing to do */ } if (isr & ISR_RST) { /* This means we got an interrupt but the ethernet * chip is shutdown. We set the flag DEF_STOPPED, and * continue processing arrived packets. When the * receive buffer is empty, we reset the dp8390. */ printf("%s: network interface stopped\n", dep->de_name); dep->de_flags |= DEF_STOPPED; break; } } if ((dep->de_flags & (DEF_READING | DEF_STOPPED)) == (DEF_READING | DEF_STOPPED)) { /* The chip is stopped, and all arrived packets delivered */ ns_reset(dep); dep->de_flags &= NOT(DEF_STOPPED); } return; }