void ip_rcv(void) { uint16_t len = pkt_left; SNMP(ip_in_receives); if (!pkt_pull(&iph, sizeof(struct iphdr))) return; plen = ntohs(iph.tot_len); /* FIXME: for speed fold ihl/version and be smarter */ if (iph.ihl < 5 || iph.version != 4 || len < plen) { SNMP(ip_in_hdr_errors); return; } plen -= sizeof(struct iphdr)); if (pkt_len > plen) pkt_len = plen; /* FIXME: checksum */ if (iph.ihl != 5) if (ip_options()) return; /* No frags for now (memory limits on 8bit) */ if (iph.frag_off) return; if (iph.daddr == 0xFFFFFFFF) pkt_type = PKT_BROADCAST; else if (MULTICAST(iph.daddr)) pkt_type = PKT_MULTICAST; else if (iph.daddr == ip_addr || LOOPBACK(iph.daddr)) pkt_type = PKT_HOST; else /* No forwarding so we don't have to worry about martians either */ return; /* FIXME: raw sockets ?? */ if (iph.protocol == IPPROTO_TCP) tcp_rcv(); else if (iph.protocol == IPPROTO_UDP) udp_rcv(); else if (iph.protocol == IPPROTO_ICMP) icmp_rcv(); else icmp_send_unreach(ICMP_DEST_UNREACH, ICMP_PROT_UNREACH); }
static void connected_announce (struct interface *ifp, struct connected *ifc) { if (!ifc) return; listnode_add (ifp->connected, ifc); /* Update interface address information to protocol daemon. */ if (! CHECK_FLAG (ifc->conf, ZEBRA_IFC_REAL)) { if (ifc->address->family == AF_INET) if_subnet_add (ifp, ifc); SET_FLAG (ifc->conf, ZEBRA_IFC_REAL); /*gjd : add for Rtsuit restart, ip address not write show running .So use add flag RTMD_RESTART_IP_CONFIG , when set it by netlink info from kernel through the IFA_BROADCAST. Becaus when setup vrrp , the virtual ip will insert, the IFA_BROADCAST will not get from kenerl . 2011-12-20 pm 7:00*/ if((!CHECK_FLAG (ifc->conf, ZEBRA_IFC_CONFIGURED)) &&(keep_kernel_mode == 1) &&(CHECK_FLAG(ifc->ip_config,RTMD_RESTART_IP_CONFIG))) { SET_FLAG (ifc->conf, ZEBRA_IFC_CONFIGURED); /*zlog_err("%s : line %d ifc->conf(%u), ifc->ipconfig(%u).\n", __func__,__LINE__,ifc->conf,ifc->ip_config);*/ } if(ifp && (strncmp(ifp->name,"lo",2)==0)&&(!LOOPBACK(ifc->address->u.prefix4.s_addr))) SET_FLAG (ifc->conf, ZEBRA_IFC_CONFIGURED); zebra_interface_address_add_update (ifp, ifc); if (if_is_up(ifp)) { if (ifc->address->family == AF_INET) connected_up_ipv4 (ifp, ifc); #ifdef HAVE_IPV6 else connected_up_ipv6 (ifp, ifc); #endif } } }
static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa) { struct in_device *in_dev = __in_dev_get_rtnl(dev); ASSERT_RTNL(); if (!in_dev) { inet_free_ifa(ifa); return -ENOBUFS; } ipv4_devconf_setall(in_dev); if (ifa->ifa_dev != in_dev) { WARN_ON(ifa->ifa_dev); in_dev_hold(in_dev); ifa->ifa_dev = in_dev; } if (LOOPBACK(ifa->ifa_local)) ifa->ifa_scope = RT_SCOPE_HOST; return inet_insert_ifa(ifa); }
int ip_output(void *pbuf, uint16_t plen, void *dbuf, uint16_t dlen) { oiph.id = htons_inc(ip_id); oiph.ttl = IP_TTL; oiph.check = 0; oiph.version = 4; /* Options not supported */ oiph.ihl = 5; oiph.check = ip_checksum(&oiph, sizeof(oiph)); output_begin(); /* Set up output buffer (space left for header) */ output_add(&oiph, 4 * oiph.ihl); output_add(pbuf, plen); ouput_add(dbuf, dlen); if (LOOPBACK(oiph.daddr) || oiph.daddr == ip_addr) return loopback_queue(); else return mac_queue(); /* We do blocking writes, when this code returns the buffer is on the wire and we don't have to fret about re-use. Does mean slip has to be careful to buffer the receive side while queueing output */ }
static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa) { struct in_device *in_dev = __in_dev_get(dev); ASSERT_RTNL(); if (!in_dev) { in_dev = inetdev_init(dev); if (!in_dev) { inet_free_ifa(ifa); return -ENOBUFS; } } if (ifa->ifa_dev != in_dev) { BUG_TRAP(!ifa->ifa_dev); in_dev_hold(in_dev); ifa->ifa_dev = in_dev; } if (LOOPBACK(ifa->ifa_local)) ifa->ifa_scope = RT_SCOPE_HOST; return inet_insert_ifa(ifa); }
static int arp_process(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct in_device *in_dev = in_dev_get(dev); struct arphdr *arp; unsigned char *arp_ptr; struct rtable *rt; unsigned char *sha, *tha; u32 sip, tip; u16 dev_type = dev->type; int addr_type; struct neighbour *n; /* arp_rcv below verifies the ARP header and verifies the device * is ARP'able. */ if (in_dev == NULL) goto out; arp = skb->nh.arph; switch (dev_type) { default: if (arp->ar_pro != htons(ETH_P_IP) || htons(dev_type) != arp->ar_hrd) goto out; break; #ifdef CONFIG_NET_ETHERNET case ARPHRD_ETHER: #endif #ifdef CONFIG_TR case ARPHRD_IEEE802_TR: #endif #ifdef CONFIG_FDDI case ARPHRD_FDDI: #endif #ifdef CONFIG_NET_FC case ARPHRD_IEEE802: #endif #if defined(CONFIG_NET_ETHERNET) || defined(CONFIG_TR) || \ defined(CONFIG_FDDI) || defined(CONFIG_NET_FC) /* * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802 * devices, according to RFC 2625) devices will accept ARP * hardware types of either 1 (Ethernet) or 6 (IEEE 802.2). * This is the case also of FDDI, where the RFC 1390 says that * FDDI devices should accept ARP hardware of (1) Ethernet, * however, to be more robust, we'll accept both 1 (Ethernet) * or 6 (IEEE 802.2) */ if ((arp->ar_hrd != htons(ARPHRD_ETHER) && arp->ar_hrd != htons(ARPHRD_IEEE802)) || arp->ar_pro != htons(ETH_P_IP)) goto out; break; #endif #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) case ARPHRD_AX25: if (arp->ar_pro != htons(AX25_P_IP) || arp->ar_hrd != htons(ARPHRD_AX25)) goto out; break; #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) case ARPHRD_NETROM: if (arp->ar_pro != htons(AX25_P_IP) || arp->ar_hrd != htons(ARPHRD_NETROM)) goto out; break; #endif #endif } /* Understand only these message types */ if (arp->ar_op != htons(ARPOP_REPLY) && arp->ar_op != htons(ARPOP_REQUEST)) goto out; /* * Extract fields */ arp_ptr= (unsigned char *)(arp+1); sha = arp_ptr; arp_ptr += dev->addr_len; memcpy(&sip, arp_ptr, 4); arp_ptr += 4; tha = arp_ptr; arp_ptr += dev->addr_len; memcpy(&tip, arp_ptr, 4); /* * Check for bad requests for 127.x.x.x and requests for multicast * addresses. If this is one such, delete it. */ if (LOOPBACK(tip) || MULTICAST(tip)) goto out; /* * Special case: We must set Frame Relay source Q.922 address */ if (dev_type == ARPHRD_DLCI) sha = dev->broadcast; /* * Process entry. The idea here is we want to send a reply if it is a * request for us or if it is a request for someone else that we hold * a proxy for. We want to add an entry to our cache if it is a reply * to us or if it is a request for our address. * (The assumption for this last is that if someone is requesting our * address, they are probably intending to talk to us, so it saves time * if we cache their address. Their address is also probably not in * our cache, since ours is not in their cache.) * * Putting this another way, we only care about replies if they are to * us, in which case we add them to the cache. For requests, we care * about those for us and those for our proxies. We reply to both, * and in the case of requests for us we add the requester to the arp * cache. */ /* Special case: IPv4 duplicate address detection packet (RFC2131) */ if (sip == 0) { if (arp->ar_op == htons(ARPOP_REQUEST) && inet_addr_type(tip) == RTN_LOCAL && !arp_ignore(in_dev,dev,sip,tip)) arp_send(ARPOP_REPLY,ETH_P_ARP,tip,dev,tip,sha,dev->dev_addr,dev->dev_addr); goto out; } if (arp->ar_op == htons(ARPOP_REQUEST) && ip_route_input(skb, tip, sip, 0, dev) == 0) { rt = (struct rtable*)skb->dst; addr_type = rt->rt_type; if (addr_type == RTN_LOCAL) { n = neigh_event_ns(&arp_tbl, sha, &sip, dev); if (n) { int dont_send = 0; if (!dont_send) dont_send |= arp_ignore(in_dev,dev,sip,tip); if (!dont_send && IN_DEV_ARPFILTER(in_dev)) dont_send |= arp_filter(sip,tip,dev); if (!dont_send) arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); neigh_release(n); } goto out; } else if (IN_DEV_FORWARD(in_dev)) { if ((rt->rt_flags&RTCF_DNAT) || (addr_type == RTN_UNICAST && rt->u.dst.dev != dev && (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, &tip, dev, 0)))) { n = neigh_event_ns(&arp_tbl, sha, &sip, dev); if (n) neigh_release(n); if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || skb->pkt_type == PACKET_HOST || in_dev->arp_parms->proxy_delay == 0) { arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); } else { pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); in_dev_put(in_dev); return 0; } goto out; } } } /* Update our ARP tables */ n = __neigh_lookup(&arp_tbl, &sip, dev, 0); #ifdef CONFIG_IP_ACCEPT_UNSOLICITED_ARP /* Unsolicited ARP is not accepted by default. It is possible, that this option should be enabled for some devices (strip is candidate) */ if (n == NULL && arp->ar_op == htons(ARPOP_REPLY) && inet_addr_type(sip) == RTN_UNICAST) n = __neigh_lookup(&arp_tbl, &sip, dev, -1); #endif if (n) { int state = NUD_REACHABLE; int override; /* If several different ARP replies follows back-to-back, use the FIRST one. It is possible, if several proxy agents are active. Taking the first reply prevents arp trashing and chooses the fastest router. */ override = time_after(jiffies, n->updated + n->parms->locktime); /* Broadcast replies and request packets do not assert neighbour reachability. */ if (arp->ar_op != htons(ARPOP_REPLY) || skb->pkt_type != PACKET_HOST) state = NUD_STALE; neigh_update(n, sha, state, override ? NEIGH_UPDATE_F_OVERRIDE : 0); neigh_release(n); }
/*** * arp_rcv: Receive an arp request by the device layer. */ int rt_arp_rcv(struct rtskb *skb, struct rtnet_device *rtdev, struct rtpacket_type *pt) { struct net_device *dev = dev_get_by_rtdev(rtdev); struct arphdr *arp = skb->nh.arph; unsigned char *arp_ptr= (unsigned char *)(arp+1); unsigned char *sha, *tha; u32 sip, tip; u16 dev_type = dev->type; /* * The hardware length of the packet should match the hardware length * of the device. Similarly, the hardware types should match. The * device should be ARP-able. Also, if pln is not 4, then the lookup * is not from an IP number. We can't currently handle this, so toss * it. */ if (arp->ar_hln != dev->addr_len || dev->flags & IFF_NOARP || skb->pkt_type == PACKET_OTHERHOST || skb->pkt_type == PACKET_LOOPBACK || arp->ar_pln != 4) goto out; switch (dev_type) { default: if ( arp->ar_pro != __constant_htons(ETH_P_IP) && htons(dev_type) != arp->ar_hrd ) goto out; break; case ARPHRD_ETHER: /* * ETHERNET devices will accept ARP hardware types of either * 1 (Ethernet) or 6 (IEEE 802.2). */ if (arp->ar_hrd != __constant_htons(ARPHRD_ETHER) && arp->ar_hrd != __constant_htons(ARPHRD_IEEE802)) { goto out; } if (arp->ar_pro != __constant_htons(ETH_P_IP)) { goto out; } break; } /* Understand only these message types */ if (arp->ar_op != __constant_htons(ARPOP_REPLY) && arp->ar_op != __constant_htons(ARPOP_REQUEST)) goto out; /* * Extract fields */ sha=arp_ptr; arp_ptr += dev->addr_len; memcpy(&sip, arp_ptr, 4); arp_ptr += 4; tha=arp_ptr; arp_ptr += dev->addr_len; memcpy(&tip, arp_ptr, 4); /* * Check for bad requests for 127.x.x.x and requests for multicast * addresses. If this is one such, delete it. */ if (LOOPBACK(tip) || MULTICAST(tip)) goto out; if (dev_type == ARPHRD_DLCI) sha = dev->broadcast; /* * Process entry. The idea here is we want to send a reply if it is a * request for us or if it is a request for someone else that we hold * a proxy for. We want to add an entry to our cache if it is a reply * to us or if it is a request for our address. * (The assumption for this last is that if someone is requesting our * address, they are probably intending to talk to us, so it saves time * if we cache their address. Their address is also probably not in * our cache, since ours is not in their cache.) * * Putting this another way, we only care about replies if they are to * us, in which case we add them to the cache. For requests, we care * about those for us and those for our proxies. We reply to both, * and in the case of requests for us we add the requester to the arp * cache. */ if ( rt_ip_route_input(skb, tip, sip, rtdev)==0 ) { rt_arp_table_add(sip, sha); if ( arp->ar_op==__constant_htons(ARPOP_REQUEST) ) rt_arp_send(ARPOP_REPLY,ETH_P_ARP,sip,rtdev,tip,sha,dev->dev_addr,sha); } out: kfree_rtskb(skb); return 0; }
static void arp_reply(struct sk_buff *skb) { struct netpoll_info *npinfo = skb->dev->npinfo; struct arphdr *arp; unsigned char *arp_ptr; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; __be32 sip, tip; unsigned char *sha; struct sk_buff *send_skb; struct netpoll *np = NULL; if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) np = npinfo->rx_np; if (!np) return; /* No arp on this interface */ if (skb->dev->flags & IFF_NOARP) return; if (!pskb_may_pull(skb, (sizeof(struct arphdr) + (2 * skb->dev->addr_len) + (2 * sizeof(u32))))) return; skb_reset_network_header(skb); skb_reset_transport_header(skb); arp = arp_hdr(skb); if ((arp->ar_hrd != htons(ARPHRD_ETHER) && arp->ar_hrd != htons(ARPHRD_IEEE802)) || arp->ar_pro != htons(ETH_P_IP) || arp->ar_op != htons(ARPOP_REQUEST)) return; arp_ptr = (unsigned char *)(arp+1); /* save the location of the src hw addr */ sha = arp_ptr; arp_ptr += skb->dev->addr_len; memcpy(&sip, arp_ptr, 4); arp_ptr += 4; /* if we actually cared about dst hw addr, it would get copied here */ arp_ptr += skb->dev->addr_len; memcpy(&tip, arp_ptr, 4); /* Should we ignore arp? */ if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip)) return; size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4); send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), LL_RESERVED_SPACE(np->dev)); if (!send_skb) return; skb_reset_network_header(send_skb); arp = (struct arphdr *) skb_put(send_skb, size); send_skb->dev = skb->dev; send_skb->protocol = htons(ETH_P_ARP); /* Fill the device header for the ARP frame */ if (np->dev->hard_header && np->dev->hard_header(send_skb, skb->dev, ptype, sha, np->local_mac, send_skb->len) < 0) { kfree_skb(send_skb); return; } /* * Fill out the arp protocol part. * * we only support ethernet device type, * which (according to RFC 1390) should always equal 1 (Ethernet). */ arp->ar_hrd = htons(np->dev->type); arp->ar_pro = htons(ETH_P_IP); arp->ar_hln = np->dev->addr_len; arp->ar_pln = 4; arp->ar_op = htons(type); arp_ptr=(unsigned char *)(arp + 1); memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); arp_ptr += np->dev->addr_len; memcpy(arp_ptr, &tip, 4); arp_ptr += 4; memcpy(arp_ptr, sha, np->dev->addr_len); arp_ptr += np->dev->addr_len; memcpy(arp_ptr, &sip, 4); netpoll_send_skb(np, send_skb); }
void ssa_nodse_stat_entrance(struct sk_buff *skb, u32 dir) { u32 packet_len = (u32)ntohs(skb->nh.iph->tot_len) + SSA_MAC_HEAD_LENGTH; s32 in_if_num; s32 out_if_num; u32 src_ip = skb->nh.iph->saddr; u32 dst_ip = skb->nh.iph->daddr; u32 dst_match_flag = 0; u32 src_match_flag = 0; u32 app_id = skb_get_application_id(skb); s32 i; struct net_device dev; u32 dev_type = 0; if (app_id >= SSA_APP_NUM_MAX) { app_id = 127; } if(LOOPBACK(src_ip) || MULTICAST(src_ip) || BADCLASS(src_ip) || ZERONET(src_ip) || LOCAL_MCAST(src_ip) || BROADCAST(src_ip) || LOOPBACK(dst_ip) || MULTICAST(dst_ip) || BADCLASS(dst_ip) || ZERONET(dst_ip) || LOCAL_MCAST(dst_ip) || BROADCAST(dst_ip)) { return ; } if (SSA_ON==g_ssa_conf.if_log_switch) { in_if_num = if_dev_get_phy_serial_num(skb->in_if); dev.ifindex = skb->in_if; eth_dev_ioctl( &dev, (void *)&dev_type, SIOCGPRIVATEIFTYPE); if(dir == STREAM_IN||DEV_IFTYPE_SNIFF == dev_type) { ssa_if_stat_entrance(packet_len, in_if_num, app_id, STREAM_IN); if (SSA_ON==g_ssa_firstpage_info.stat_flag) { ssa_firstpage_data_stat(packet_len, in_if_num, STREAM_IN); } } else { out_if_num = if_dev_get_phy_serial_num(skb->out_if); ssa_if_stat_entrance(packet_len, in_if_num, app_id, STREAM_IN); ssa_if_stat_entrance(packet_len, out_if_num, app_id, STREAM_OUT); if (SSA_ON==g_ssa_firstpage_info.stat_flag) { ssa_firstpage_data_stat(packet_len, in_if_num, STREAM_IN); ssa_firstpage_data_stat(packet_len, out_if_num, STREAM_OUT); } } } if (g_ssa_conf.ip_log_switch == SSA_ON && dir == STREAM_OUT) { for(i = 0; (i < SSA_USER_GROUP_NUM)&&(g_ssa_conf.stat_group_id[i] != 0); i++) { if(1 == dst_match_flag && 1 == src_match_flag) { return; } if((0 == src_match_flag) && (net_user_ip_in_list == net_user_get_by_id_ip(g_ssa_conf.stat_group_id[i], src_ip))) { src_match_flag = 1; ssa_ip_stat_entrance(packet_len, 1, 0, 0, src_ip, g_ssa_conf.stat_group_id[i], app_id, SSA_STREAM_UP, NULL); } if((0 == dst_match_flag) && (net_user_ip_in_list == net_user_get_by_id_ip(g_ssa_conf.stat_group_id[i], dst_ip))) { dst_match_flag = 1; ssa_ip_stat_entrance(packet_len, 1, 0, 0, dst_ip, g_ssa_conf.stat_group_id[i], app_id, SSA_STREAM_DOWN, NULL); } } } return; }
int ip_build_xmit(struct sock *sk, void getfrag (const void *, __u32, char *, unsigned int, unsigned int), const void *frag, unsigned short int length, __u32 daddr, __u32 user_saddr, struct options * opt, int flags, int type, int noblock) { struct rtable *rt; unsigned int fraglen, maxfraglen, fragheaderlen; int offset, mf; __u32 saddr; unsigned short id; struct iphdr *iph; __u32 raddr; struct device *dev = NULL; struct hh_cache * hh=NULL; int nfrags=0; __u32 true_daddr = daddr; if (opt && opt->srr && !sk->ip_hdrincl) daddr = opt->faddr; ip_statistics.IpOutRequests++; #ifdef CONFIG_IP_MULTICAST if(MULTICAST(daddr) && *sk->ip_mc_name) { dev=dev_get(sk->ip_mc_name); if(!dev) return -ENODEV; rt=NULL; if (sk->saddr && (!LOOPBACK(sk->saddr) || LOOPBACK(daddr))) saddr = sk->saddr; else saddr = dev->pa_addr; } else { #endif rt = ip_check_route(&sk->ip_route_cache, daddr, sk->localroute || (flags&MSG_DONTROUTE) || (opt && opt->is_strictroute), sk->bound_device); if (rt == NULL) { ip_statistics.IpOutNoRoutes++; return(-ENETUNREACH); } saddr = rt->rt_src; hh = rt->rt_hh; if (sk->saddr && (!LOOPBACK(sk->saddr) || LOOPBACK(daddr))) saddr = sk->saddr; dev=rt->rt_dev; #ifdef CONFIG_IP_MULTICAST } if (rt && !dev) dev = rt->rt_dev; #endif if (user_saddr) saddr = user_saddr; raddr = rt ? rt->rt_gateway : daddr; /* * Now compute the buffer space we require */ /* * Try the simple case first. This leaves broadcast, multicast, fragmented frames, and by * choice RAW frames within 20 bytes of maximum size(rare) to the long path */ if (!sk->ip_hdrincl) { length += sizeof(struct iphdr); if (opt) { /* make sure not to exceed maximum packet size */ if (0xffff - length < opt->optlen) return -EMSGSIZE; length += opt->optlen; } } if(length <= dev->mtu && !MULTICAST(daddr) && daddr!=0xFFFFFFFF && daddr!=dev->pa_brdaddr) { int error; struct sk_buff *skb=sock_alloc_send_skb(sk, length+15+dev->hard_header_len,0, noblock, &error); if(skb==NULL) { ip_statistics.IpOutDiscards++; return error; } skb->dev=dev; skb->protocol = htons(ETH_P_IP); skb->free=1; skb->when=jiffies; skb->sk=sk; skb->arp=0; skb->saddr=saddr; skb->raddr = raddr; skb_reserve(skb,(dev->hard_header_len+15)&~15); if (hh) { skb->arp=1; memcpy(skb_push(skb,dev->hard_header_len),hh->hh_data,dev->hard_header_len); if (!hh->hh_uptodate) { skb->arp = 0; #if RT_CACHE_DEBUG >= 2 printk("ip_build_xmit: hh miss %08x via %08x\n", rt->rt_dst, rt->rt_gateway); #endif } } else if(dev->hard_header) { if(dev->hard_header(skb,dev,ETH_P_IP,NULL,NULL,0)>0) skb->arp=1; } else skb->arp=1; skb->ip_hdr=iph=(struct iphdr *)skb_put(skb,length); dev_lock_list(); if(!sk->ip_hdrincl) { iph->version=4; iph->ihl=5; iph->tos=sk->ip_tos; iph->tot_len = htons(length); iph->id=htons(ip_id_count++); iph->frag_off = 0; iph->ttl=sk->ip_ttl; iph->protocol=type; iph->saddr=saddr; iph->daddr=daddr; if (opt) { iph->ihl += opt->optlen>>2; ip_options_build(skb, opt, true_daddr, dev->pa_addr, 0); } iph->check=0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); getfrag(frag,saddr,((char *)iph)+iph->ihl*4,0, length-iph->ihl*4); }
/* * This routine builds the appropriate hardware/IP headers for * the routine. It assumes that if *dev != NULL then the * protocol knows what it's doing, otherwise it uses the * routing/ARP tables to select a device struct. */ int ip_build_header(struct sk_buff *skb, __u32 saddr, __u32 daddr, struct device **dev, int type, struct options *opt, int len, int tos, int ttl, struct rtable ** rp) { struct rtable *rt; __u32 raddr; int tmp; struct iphdr *iph; __u32 final_daddr = daddr; if (opt && opt->srr) daddr = opt->faddr; /* * See if we need to look up the device. */ #ifdef CONFIG_IP_MULTICAST if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name) *dev=dev_get(skb->sk->ip_mc_name); #endif if (rp) { rt = ip_check_route(rp, daddr, skb->localroute, *dev); /* * If rp != NULL rt_put following below should not * release route, so that... */ if (rt) atomic_inc(&rt->rt_refcnt); } else rt = ip_rt_route(daddr, skb->localroute, *dev); if (*dev == NULL) { if (rt == NULL) { ip_statistics.IpOutNoRoutes++; return(-ENETUNREACH); } *dev = rt->rt_dev; } if ((LOOPBACK(saddr) && !LOOPBACK(daddr)) || !saddr) saddr = rt ? rt->rt_src : (*dev)->pa_addr; raddr = rt ? rt->rt_gateway : daddr; if (opt && opt->is_strictroute && rt && (rt->rt_flags & RTF_GATEWAY)) { ip_rt_put(rt); ip_statistics.IpOutNoRoutes++; return -ENETUNREACH; } /* * Now build the MAC header. */ if (type==IPPROTO_TCP) tmp = ip_send_room(rt, skb, raddr, len, *dev, saddr); else tmp = ip_send(rt, skb, raddr, len, *dev, saddr); ip_rt_put(rt); /* * Book keeping */ skb->dev = *dev; skb->saddr = saddr; /* * Now build the IP header. */ /* * If we are using IPPROTO_RAW, then we don't need an IP header, since * one is being supplied to us by the user */ if(type == IPPROTO_RAW) return (tmp); /* * Build the IP addresses */ if (opt) iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr) + opt->optlen); else iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr)); iph->version = 4; iph->ihl = 5; iph->tos = tos; iph->frag_off = 0; iph->ttl = ttl; iph->daddr = daddr; iph->saddr = saddr; iph->protocol = type; skb->ip_hdr = iph; if (!opt || !opt->optlen) return sizeof(struct iphdr) + tmp; iph->ihl += opt->optlen>>2; ip_options_build(skb, opt, final_daddr, (*dev)->pa_addr, 0); return iph->ihl*4 + tmp; }
/* * This routine builds the appropriate hardware/IP headers for * the routine. It assumes that if *dev != NULL then the * protocol knows what it's doing, otherwise it uses the * routing/ARP tables to select a device struct. */ int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr, struct device **dev, int type, struct options *opt, int len, int tos, int ttl) { static struct options optmem; struct iphdr *iph; struct rtable *rt; unsigned char *buff; unsigned long raddr; int tmp; unsigned long src; /* * If there is no 'from' address as yet, then make it our loopback */ if (saddr == 0) saddr = ip_my_addr(); buff = skb->data; /* * See if we need to look up the device. */ if (*dev == NULL) { if(skb->localroute) rt = ip_rt_local(daddr, &optmem, &src); else rt = ip_rt_route(daddr, &optmem, &src); if (rt == NULL) { ip_statistics.IpOutNoRoutes++; return(-ENETUNREACH); } *dev = rt->rt_dev; /* * If the frame is from us and going off machine it MUST MUST MUST * have the output device ip address and never the loopback */ if (LOOPBACK(saddr) && !LOOPBACK(daddr)) saddr = src;/*rt->rt_dev->pa_addr;*/ raddr = rt->rt_gateway; opt = &optmem; } else { /* * We still need the address of the first hop. */ if(skb->localroute) rt = ip_rt_local(daddr, &optmem, &src); else rt = ip_rt_route(daddr, &optmem, &src); /* * If the frame is from us and going off machine it MUST MUST MUST * have the output device ip address and never the loopback */ if (LOOPBACK(saddr) && !LOOPBACK(daddr)) saddr = src;/*rt->rt_dev->pa_addr;*/ raddr = (rt == NULL) ? 0 : rt->rt_gateway; } /* * No gateway so aim at the real destination */ if (raddr == 0) raddr = daddr; /* * Now build the MAC header. */ tmp = ip_send(skb, raddr, len, *dev, saddr); buff += tmp; len -= tmp; /* * Book keeping */ skb->dev = *dev; skb->saddr = saddr; if (skb->sk) skb->sk->saddr = saddr; /* * Now build the IP header. */ /* * If we are using IPPROTO_RAW, then we don't need an IP header, since * one is being supplied to us by the user */ if(type == IPPROTO_RAW) return (tmp); iph = (struct iphdr *)buff; iph->version = 4; iph->tos = tos; iph->frag_off = 0; iph->ttl = ttl; iph->daddr = daddr; iph->saddr = saddr; iph->protocol = type; iph->ihl = 5; /* Setup the IP options. */ #ifdef Not_Yet_Avail build_options(iph, opt); #endif return(20 + tmp); /* IP header plus MAC header size */ }