s32 mpls_echo_info_get(void * buf) { s32 err = 0; struct fib_mg_res ret = {0}; mpls_echo_sys_req * req = (mpls_echo_sys_req *)buf ; mpls_echo_sys_res * res = (mpls_echo_sys_res *)buf; err = fib_mg_lookup(req->vrfid, req->addr, req->plen, &ret); if (err) { res->count = 0; return err; } if (!ret.neigh) { ret.neigh = __neigh_arp_lookup(&arp_tbl, &ret.nexthop, NULL, ret.dev, NEIGHBOUR_CREAT); read_lock_bh(&ret.neigh->lock); if (!(ret.neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) { res->saddr = inet_select_addr(ret.dev, ret.nexthop, RT_SCOPE_LINK); arp_send(ARPOP_REQUEST, ETH_P_ARP, ret.nexthop, ret.dev, res->saddr, ret.neigh->ha, ret.dev->dev_addr, NULL); read_unlock_bh(&ret.neigh->lock); return 0; } read_unlock_bh(&ret.neigh->lock); } memcpy((void *)res->dest, (void *)ret.neigh->ha, ETH_ALEN); if (ret.dev) { res->oif = ret.dev->ifindex; res->saddr = inet_select_addr(ret.dev, ret.nexthop, RT_SCOPE_LINK); memcpy((void *)res->source, (void *)ret.dev->dev_addr, ret.dev->addr_len); } res->nexthop = ret.nexthop; res->count = ret.count; if (FIB_MG_TYPE_FTN_BASIC == ret.type) { res->labels[0] = ret.glabel; res->labels[1] = ret.glabel2; } else { res->labels[0] = ret.label; res->labels[1] = ret.glabel; res->labels[1] = ret.glabel2; } return 0; }
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) { __be32 saddr = 0; u8 dst_ha[MAX_ADDR_LEN], *dst_hw = NULL; struct net_device *dev = neigh->dev; __be32 target = *(__be32 *)neigh->primary_key; int probes = atomic_read(&neigh->probes); struct in_device *in_dev; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (!in_dev) { rcu_read_unlock(); return; } switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { default: case 0: /* By default announce any local IP */ if (skb && inet_addr_type(dev_net(dev), ip_hdr(skb)->saddr) == RTN_LOCAL) saddr = ip_hdr(skb)->saddr; break; case 1: /* Restrict announcements of saddr in same subnet */ if (!skb) break; saddr = ip_hdr(skb)->saddr; if (inet_addr_type(dev_net(dev), saddr) == RTN_LOCAL) { /* saddr should be known to target */ if (inet_addr_onlink(in_dev, target, saddr)) break; } saddr = 0; break; case 2: /* Avoid secondary IPs, get a primary/preferred one */ break; } rcu_read_unlock(); if (!saddr) saddr = inet_select_addr(dev, target, RT_SCOPE_LINK); probes -= neigh->parms->ucast_probes; if (probes < 0) { if (!(neigh->nud_state & NUD_VALID)) pr_debug("trying to ucast probe in NUD_INVALID\n"); neigh_ha_snapshot(dst_ha, neigh, dev); dst_hw = dst_ha; } else { probes -= neigh->parms->app_probes; if (probes < 0) { #ifdef CONFIG_ARPD neigh_app_ns(neigh); #endif return; } } arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, dst_hw, dev->dev_addr, NULL); }
static int bind_to_device(struct socket *sock, char *ifname) { struct net *net; struct net_device *dev; __be32 addr; struct sockaddr_in sin; int err; net = sock_net(sock->sk); dev = __dev_get_by_name(net, ifname); if (!dev){ printk("No such device named %s\n", ifname); return -ENODEV; } addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); sin.sin_family = AF_INET; sin.sin_addr.s_addr = addr; sin.sin_port = 0; err = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); if (err < 0){ printk("sock bind err, err=%d\n", err); return err; } return 0; }
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) { u32 saddr = 0; u8 *dst_ha = NULL; struct net_device *dev = neigh->dev; u32 target = *(u32*)neigh->primary_key; int probes = atomic_read(&neigh->probes); struct in_device *in_dev = in_dev_get(dev); if (!in_dev) return; switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { default: case 0: /* By default announce any local IP */ if (skb && inet_addr_type(skb->nh.iph->saddr) == RTN_LOCAL) saddr = skb->nh.iph->saddr; break; case 1: /* Restrict announcements of saddr in same subnet */ if (!skb) break; saddr = skb->nh.iph->saddr; if (inet_addr_type(saddr) == RTN_LOCAL) { /* saddr should be known to target */ if (inet_addr_onlink(in_dev, target, saddr)) break; } saddr = 0; break; case 2: /* Avoid secondary IPs, get a primary/preferred one */ break; } if (in_dev) in_dev_put(in_dev); if (!saddr) saddr = inet_select_addr(dev, target, RT_SCOPE_LINK); if ((probes -= neigh->parms->ucast_probes) < 0) { if (!(neigh->nud_state&NUD_VALID)) printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n"); dst_ha = neigh->ha; read_lock_bh(&neigh->lock); } else if ((probes -= neigh->parms->app_probes) < 0) { #ifdef CONFIG_ARPD neigh_app_ns(neigh); #endif return; } arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, dst_ha, dev->dev_addr, NULL); if (dst_ha) read_unlock_bh(&neigh->lock); }
static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, struct net_device *vrf_dev) { struct iphdr *ip4h = ip_hdr(skb); int ret = NET_XMIT_DROP; struct flowi4 fl4 = { /* needed to match OIF rule */ .flowi4_oif = vrf_dev->ifindex, .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_tos = RT_TOS(ip4h->tos), .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF, .daddr = ip4h->daddr, }; if (vrf_send_v4_prep(skb, &fl4, vrf_dev)) goto err; if (!ip4h->saddr) { ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, RT_SCOPE_LINK); } ret = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); if (unlikely(net_xmit_eval(ret))) vrf_dev->stats.tx_errors++; else ret = NET_XMIT_SUCCESS; out: return ret; err: vrf_tx_error(vrf_dev, skb); goto out; } static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) { /* strip the ethernet header added for pass through VRF device */ __skb_pull(skb, skb_network_offset(skb)); switch (skb->protocol) { case htons(ETH_P_IP): return vrf_process_v4_outbound(skb, dev); case htons(ETH_P_IPV6): return vrf_process_v6_outbound(skb, dev); default: vrf_tx_error(dev, skb); return NET_XMIT_DROP; } }
unsigned int nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, const struct nf_nat_range2 *range, const struct net_device *out) { struct nf_conn *ct; struct nf_conn_nat *nat; enum ip_conntrack_info ctinfo; struct nf_nat_range2 newrange; const struct rtable *rt; __be32 newsrc, nh; WARN_ON(hooknum != NF_INET_POST_ROUTING); ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); /* Source address is 0.0.0.0 - locally generated packet that is * probably not supposed to be masqueraded. */ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) return NF_ACCEPT; rt = skb_rtable(skb); nh = rt_nexthop(rt, ip_hdr(skb)->daddr); newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE); if (!newsrc) { pr_info("%s ate my IP address\n", out->name); return NF_DROP; } nat = nf_ct_nat_ext_add(ct); if (nat) nat->masq_index = out->ifindex; /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newsrc; newrange.max_addr.ip = newsrc; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); }
static unsigned int masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par) { struct nf_conn *ct; struct nf_conn_nat *nat; enum ip_conntrack_info ctinfo; struct nf_nat_range newrange; const struct nf_nat_ipv4_multi_range_compat *mr; const struct rtable *rt; __be32 newsrc, nh; NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING); ct = nf_ct_get(skb, &ctinfo); nat = nfct_nat(ct); NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY)); /* Source address is 0.0.0.0 - locally generated packet that is * probably not supposed to be masqueraded. */ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) return NF_ACCEPT; mr = par->targinfo; rt = skb_rtable(skb); nh = rt_nexthop(rt, ip_hdr(skb)->daddr); newsrc = inet_select_addr(par->out, nh, RT_SCOPE_UNIVERSE); if (!newsrc) { pr_info("%s ate my IP address\n", par->out->name); return NF_DROP; } nat->masq_index = par->out->ifindex; /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newsrc; newrange.max_addr.ip = newsrc; newrange.min_proto = mr->range[0].min; newrange.max_proto = mr->range[0].max; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); }
static int bind_to_device(struct socket *sock, char *ifname, unsigned short port) { struct net *net; struct net_device *dev; __be32 addr; struct sockaddr_in sin; int ret; net = sock_net(sock->sk); dev = __dev_get_by_name(net, ifname); if (!dev) { KER_DEBUG(KERN_ALERT "No such device named %s\n", ifname); return -ENODEV; } addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); sin.sin_family = AF_INET; sin.sin_addr.s_addr = addr; sin.sin_port = cpu_to_be16(port); ret = sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin)); if (ret < 0) { KER_DEBUG(KERN_ALERT "sock bind err, err=%d\n", ret); return ret; } return 0; }
static int bind_mcastif_addr(struct socket *sock, char *ifname) { struct net_device *dev; u32 addr; struct sockaddr_in sin; if ((dev = __dev_get_by_name(ifname)) == NULL) return -ENODEV; addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); if (!addr) IP_VS_ERR("You probably need to specify IP address on " "multicast interface.\n"); IP_VS_DBG(7, "binding socket with (%s) %u.%u.%u.%u\n", ifname, NIPQUAD(addr)); /* Now bind the socket with the address of multicast interface */ sin.sin_family = AF_INET; sin.sin_addr.s_addr = addr; sin.sin_port = 0; return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin)); }
s32 mpls_echo_input(struct sk_buff * skb) { s32 err = 0; s32 temp = 0; u32 saddr = 0; struct timeval tv; mpls_echo_hdr * echo; struct ethhdr * eth ; struct fib_mg_res res ; struct iphdr *iph = skb->nh.iph; struct udphdr * udph = skb->h.uh; echo = (mpls_echo_hdr *)(skb->data + sizeof(struct udphdr)); skb_push(skb, iph->ihl * 4); err = fib_mg_lookup(if_dev_vrf(skb->dev)->vrf_id, iph->saddr, 32, &res); if (err) { printk("mpls_echo_input -fib_mg_lookup drop.\n"); goto drop; } temp = iph->daddr; iph->daddr = iph->saddr; if (res.dev) { iph->saddr = inet_select_addr(res.dev, res.nexthop, RT_SCOPE_LINK); } else { iph->saddr = temp; } iph->ttl = 255; iph->check = 0; iph->check = ip_fast_csum(iph, iph->ihl); echo->type = MPLS_ECHO_REPLY; do_gettimeofday(&tv); echo->t_rcvd[0] = htonl(tv.tv_sec+JAN_1970); echo->t_rcvd[1] = NTPFRAC(tv.tv_usec); temp = udph->source; udph->source = udph->dest; udph->dest = temp; if (MPLS_MODE_DO_NOT_REPLY == echo->mode) { printk("mpls_echo_input -not reply mode drop.\n"); goto drop; } if (MPLS_MODE_IPX_UDP_ALERT == echo->mode) { //Push router alert to ip option. } skb->protocol = htons(ETH_P_IP); if (FIB_MG_TYPE_FTN_BASIC == res.type) { if (MPLS_IMPLICIT_NULL != MPLS_LABEL(res.glabel)) { skb->protocol = htons(ETH_P_MPLS_UC) ; skb_push_label(skb, 128, res.glabel, 1); } if (2 == res.count) { if (MPLS_IMPLICIT_NULL != MPLS_LABEL(res.glabel2)) { skb->protocol = htons(ETH_P_MPLS_UC) ; skb_push_label(skb, 128, res.glabel2, 0); } } } else { skb->protocol = htons(ETH_P_MPLS_UC) ; skb_push_label(skb, 128, res.label, 1); if (MPLS_IMPLICIT_NULL != MPLS_LABEL(res.glabel)) { skb_push_label(skb, 128, res.glabel, 0); } if (3 == res.count) { if (MPLS_IMPLICIT_NULL != MPLS_LABEL(res.glabel2)) { skb->protocol = htons(ETH_P_MPLS_UC) ; skb_push_label(skb, 128, res.glabel2, 0); } } } if (MPLS_MODE_CTRL_CHANNEL == echo->mode) { skb_push_label(skb, 128, 1, 0); } skb->nh.raw = skb->data; skb->dev = res.dev; if (!res.neigh) { res.neigh = __neigh_arp_lookup(&arp_tbl, &res.nexthop, NULL, res.dev, NEIGHBOUR_CREAT); } read_lock_bh(&res.neigh->lock); if (!(res.neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) { saddr = inet_select_addr(res.dev, res.nexthop, RT_SCOPE_LINK); arp_send(ARPOP_REQUEST, ETH_P_ARP, res.nexthop, res.dev, saddr, res.neigh->ha, res.dev->dev_addr, NULL); read_unlock_bh(&res.neigh->lock); printk("mpls_echo_input -neigh invalid drop.\n"); goto drop; } eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); eth->h_proto = (ETH_P_802_3 != skb->protocol) ? htons(skb->protocol) : htons(skb->len); memcpy(eth->h_source, res.dev->dev_addr, res.dev->addr_len); memcpy(eth->h_dest, res.neigh->ha, res.dev->addr_len); skb->mac.raw = skb->data ; skb->mac_len = ETH_HLEN; read_unlock_bh(&res.neigh->lock); neigh_release(res.neigh); dev_queue_xmit(skb); return 0; drop: kfree_skb(skb); return -1; }
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) { __be32 saddr = 0; u8 *dst_ha = NULL; struct net_device *dev = neigh->dev; __be32 target = *(__be32 *)neigh->primary_key; int probes = atomic_read(&neigh->probes); struct in_device *in_dev; #ifdef CONFIG_HTC_NETWORK_CNE __be32 dev_addr = 0; dev_addr = inet_select_addr(dev, target, RT_SCOPE_LINK); #endif rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (!in_dev) { rcu_read_unlock(); return; } switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { default: case 0: if (skb && inet_addr_type(dev_net(dev), ip_hdr(skb)->saddr) == RTN_LOCAL) saddr = ip_hdr(skb)->saddr; break; case 1: if (!skb) break; saddr = ip_hdr(skb)->saddr; if (inet_addr_type(dev_net(dev), saddr) == RTN_LOCAL) { if (inet_addr_onlink(in_dev, target, saddr)) break; } saddr = 0; break; case 2: break; } rcu_read_unlock(); if (!saddr) saddr = inet_select_addr(dev, target, RT_SCOPE_LINK); probes -= neigh->parms->ucast_probes; if (probes < 0) { if (!(neigh->nud_state & NUD_VALID)) printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n"); dst_ha = neigh->ha; read_lock_bh(&neigh->lock); } else { probes -= neigh->parms->app_probes; if (probes < 0) { #ifdef CONFIG_ARPD neigh_app_ns(neigh); #endif return; } } #ifdef CONFIG_HTC_NETWORK_CNE if (dev_addr != saddr) { printk(KERN_DEBUG "CnE detects wrong sender IP in ARP\n"); saddr = dev_addr; } #endif arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, dst_ha, dev->dev_addr, NULL); if (dst_ha) read_unlock_bh(&neigh->lock); }
static unsigned int conenat_tg(struct sk_buff *skb, const struct xt_target_param *par) { struct net *net; struct nf_conn *ct; struct nf_conn_nat *nat; enum ip_conntrack_info ctinfo; struct nf_nat_range newrange; const struct nf_nat_multi_range_compat *mr; struct rtable *rt; __be32 newsrc; NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING); ct = nf_ct_get(skb, &ctinfo); nat = nfct_nat(ct); net = nf_ct_net(ct); NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); /* Source address is 0.0.0.0 - locally generated packet that is * probably not supposed to be masqueraded. */ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) return NF_ACCEPT; mr = par->targinfo; rt = skb->rtable; newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE); if (!newsrc) { printk("CONENAT: %s ate my IP address\n", par->out->name); return NF_DROP; } write_lock_bh(&conenat_lock); nat->masq_index = par->out->ifindex; write_unlock_bh(&conenat_lock); if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == IPPROTO_UDP) { unsigned int ret,expectcount = net->ct.expect_count; u_int16_t minport, maxport; u_int16_t newport, tmpport; struct nf_conntrack_expect *exp=NULL; struct nf_conntrack_tuple tuple; struct nf_conn_help *help = nfct_help(ct); /* Choose port */ spin_lock_bh(&nf_conntrack_lock); #if 0 exp = LIST_FIND(&nf_conntrack_expect_list, exp_src_cmp, struct nf_conntrack_expect *, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); #endif memset(&tuple,0,sizeof(tuple)); //src tuple.src.l3num = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num; tuple.src.u3.ip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip; tuple.src.u.udp.port = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.udp.port; //dst tuple.dst.u3.ip = newsrc; //tuple.dst.u.udp.port = htons(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port); newport = htons(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port); tuple.dst.protonum = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; pr_debug("tupple1 = %pI4:%hu\n", &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip,ntohs(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port)); if(expectcount > 0){ for(tmpport=0; (tmpport<=expectcount)&&(newport<=65535); tmpport++,newport++){ tuple.dst.u.udp.port=newport; exp = __nf_ct_expect_find_bysave(net, &tuple, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); if(exp) break; } } if (exp) { minport = maxport = exp->tuple.dst.u.udp.port; pr_debug("existing mapped port = %hu\n", ntohs(minport)); } else { minport = mr->range[0].min.udp.port == 0 ? ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port : mr->range[0].min.udp.port; maxport = mr->range[0].max.udp.port == 0 ? htons(65535) : mr->range[0].max.udp.port; for (newport = ntohs(minport),tmpport = ntohs(maxport); newport <= tmpport; newport++) { #if 0 exp = LIST_FIND(&ip_conntrack_expect_list, exp_cmp, struct nf_conntrack_expect *, newsrc, htons(newport), ct->tuplehash[IP_CT_DIR_ORIGINAL]. tuple.dst.protonum); #endif //dst tuple.dst.u.udp.port = htons(newport); exp = __nf_ct_expect_find(net, &tuple); if (!exp) { pr_debug("new mapping: %pI4:%hu -> %pI4:%hu\n", &(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip), ntohs(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.udp.port), &newsrc, newport); minport = maxport = htons(newport); break; } } } spin_unlock_bh(&nf_conntrack_lock); newrange.flags = mr->range[0].flags | IP_NAT_RANGE_MAP_IPS |IP_NAT_RANGE_PROTO_SPECIFIED; newrange.min_ip = newrange.max_ip = newsrc; newrange.min.udp.port = minport; newrange.max.udp.port = maxport; /* Set ct helper */ ret = nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_SRC); if (ret == NF_ACCEPT) { rcu_read_lock(); if (help == NULL) { help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); if (help == NULL) { return NF_ACCEPT; } } else { memset(&help->help, 0, sizeof(help->help)); } rcu_assign_pointer(help->helper, &nf_conntrack_helper_cone_nat); rcu_read_unlock(); pr_debug("helper setup, skb=%p\n", skb); } return ret; }
s32 mpls_vpws_xmit(struct sk_buff * skb, struct mpls_vpws_table * vpws) { u32 glabel = 0; u32 address = 0; u32 oifindex = 0; u32 saddr = 0; u32 nh; struct ethhdr * eth ; struct net_device * dev; struct neighbour * neigh; struct l2vpn_nexthop * nexthop; dev_vlan_info * vlan = (dev_vlan_info *)skb->dev->vlan_info; if (MPLS_VPWS_CCC_L == vpws->type) { nexthop = vpws->nexthops[0]; if (unlikely(!nexthop)) goto nexthop_invalid; skb->dev = if_dev_get_by_index(nexthop->oifindex); if (unlikely(!skb->dev)) goto oifindex_invalid; skb->mac.raw = skb->data ; skb->mac_len = ETH_HLEN; /*Add vlan tag to packet*/ if((DEV_IF_VLAN_TYPE_TRUNK == vlan->type) && (vlan->pvid != skb->vid)) { if (!(skb = vlan_add_tag(skb, (skb->vid | skb->vpri << 13)))) goto add_vlan_tag_failed; } dev_queue_xmit(skb); return MPLS_L2VPN_SUCCESS; } else { /* Get nexthop info... */ if (MPLS_VPWS_CCC_R == vpws->type) { //Ctrl word support if (vpws->ctrl_word) { skb_push_label(skb, 0, 0, 0); } nexthop = vpws->nexthops[0]; if (unlikely(!nexthop)) goto nexthop_invalid; glabel = MPLS_BAD_LABEL; address = nexthop->nexthop; oifindex = nexthop->oifindex; } else if (MPLS_VPWS_KOMPELLA == vpws->type) { //Not support now } else { /* Svc, Martini support ecmp */ //Ctrl word support if (vpws->ctrl_word) { skb_push_label(skb, 0, 0, 0); } nh = (vpws->nexthop_count > 1) ? mpls_vpws_select(skb, vpws->nexthop_count) : 0; nexthop = vpws->nexthops[nh]; if (unlikely(!nexthop)) goto nexthop_invalid; glabel = nexthop->tx_glabel; address = nexthop->nexthop; oifindex = nexthop->oifindex; } skb_push_label(skb, 128, vpws->tx_label, 1); if (MPLS_BAD_LABEL != glabel && MPLS_IMPLICIT_NULL != glabel) { skb_push_label(skb, 128, glabel, 0); } skb->protocol = ETH_P_MPLS_UC; dev = if_dev_get_by_index(oifindex); if(unlikely(!dev)) goto oifindex_invalid; skb->dev = dev; skb->nh.raw = skb->data; neigh = __neigh_arp_lookup(&arp_tbl, &address, NULL, dev, NEIGHBOUR_CREAT); if (unlikely(!neigh)) goto create_neigh_failed; /* Neighour is invalid, Send arp resquest */ read_lock_bh(&neigh->lock); if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) { saddr = inet_select_addr(dev, address, RT_SCOPE_LINK); arp_send(ARPOP_REQUEST, ETH_P_ARP, address, dev, saddr, neigh->ha, dev->dev_addr, NULL); read_unlock_bh(&neigh->lock); goto neighbour_invalid; } eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); eth->h_proto = (ETH_P_802_3 != skb->protocol) ? htons(skb->protocol) : htons(skb->len); memcpy(eth->h_source, dev->dev_addr, dev->addr_len); memcpy(eth->h_dest, neigh->ha, dev->addr_len); skb->mac.raw = skb->data ; skb->mac_len = ETH_HLEN; read_unlock_bh(&neigh->lock); dev_queue_xmit(skb); return MPLS_L2VPN_SUCCESS; } nexthop_invalid: MPLS_DEBUG_COUNTER_INC(vpws_input_nexthop_invalid); goto drop; oifindex_invalid: MPLS_DEBUG_COUNTER_INC(vpws_input_oifindex_invalid); goto drop; neighbour_invalid: MPLS_DEBUG_COUNTER_INC(vpws_input_neigh_invalid); goto drop; create_neigh_failed: MPLS_DEBUG_COUNTER_INC(vpws_input_neigh_create); goto drop; add_vlan_tag_failed: MPLS_DEBUG_COUNTER_INC(vpws_input_add_vlan_tag); drop: kfree_skb(skb); return MPLS_L2VPN_DROP; }
static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, struct net_device *vrf_dev) { struct iphdr *ip4h = ip_hdr(skb); int ret = NET_XMIT_DROP; struct flowi4 fl4 = { /* needed to match OIF rule */ .flowi4_oif = vrf_dev->ifindex, .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_tos = RT_TOS(ip4h->tos), .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF, .flowi4_proto = ip4h->protocol, .daddr = ip4h->daddr, .saddr = ip4h->saddr, }; struct net *net = dev_net(vrf_dev); struct rtable *rt; rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto err; skb_dst_drop(skb); /* if dst.dev is loopback or the VRF device again this is locally * originated traffic destined to a local address. Short circuit * to Rx path using our local dst */ if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) { struct net_vrf *vrf = netdev_priv(vrf_dev); struct rtable *rth_local; struct dst_entry *dst = NULL; ip_rt_put(rt); rcu_read_lock(); rth_local = rcu_dereference(vrf->rth_local); if (likely(rth_local)) { dst = &rth_local->dst; dst_hold(dst); } rcu_read_unlock(); if (unlikely(!dst)) goto err; return vrf_local_xmit(skb, vrf_dev, dst); } skb_dst_set(skb, &rt->dst); /* strip the ethernet header added for pass through VRF device */ __skb_pull(skb, skb_network_offset(skb)); if (!ip4h->saddr) { ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, RT_SCOPE_LINK); } ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); if (unlikely(net_xmit_eval(ret))) vrf_dev->stats.tx_errors++; else ret = NET_XMIT_SUCCESS; out: return ret; err: vrf_tx_error(vrf_dev, skb); goto out; } static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) { switch (skb->protocol) { case htons(ETH_P_IP): return vrf_process_v4_outbound(skb, dev); case htons(ETH_P_IPV6): return vrf_process_v6_outbound(skb, dev); default: vrf_tx_error(dev, skb); return NET_XMIT_DROP; } } static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) { netdev_tx_t ret = is_ip_tx_frame(skb, dev); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); u64_stats_update_begin(&dstats->syncp); dstats->tx_pkts++; dstats->tx_bytes += skb->len; u64_stats_update_end(&dstats->syncp); } else { this_cpu_inc(dev->dstats->tx_drps); } return ret; }