Ejemplo n.º 1
0
// Method to send the message to the destination VM using ipv4 and ip_local_out() function
static int cse536_sendPacket(char *data, size_t length)
{
    struct sk_buff *skb;
    struct iphdr *iph;
    struct rtable *rt;
    struct net *netw = &init_net;
    unsigned char *skbdata;

    // create sk_buff and add the user data to it
    skb = alloc_skb(sizeof(struct iphdr) + 4096, GFP_ATOMIC);
    skb_reserve(skb, sizeof(struct iphdr) + 1500);
    skbdata = skb_put(skb, length);
    memcpy(skbdata, data, length);
    // setup space and then add the ip header
    skb_push(skb, sizeof(struct iphdr));
    skb_reset_network_header(skb);
    iph = ip_hdr(skb);
    iph->version  = 4;
    iph->ihl      = 5;
    iph->tos      = 0;
    iph->frag_off = 0;
    iph->ttl      = 64;
    iph->daddr    = my_daddr;
    iph->saddr    = my_saddr;
    iph->protocol = IPPROTO_CSE536;
    iph->id       = htons(1);
    iph->tot_len  = htons(skb->len);
    // get the destination route
    rt = ip_route_output(netw, my_daddr, my_saddr, 0,0);
    skb_dst_set(skb, &rt->dst);
    return ip_local_out(skb);
}
Ejemplo n.º 2
0
static int ipip_tunnel_init(struct device *dev)
{
	struct device *tdev = NULL;
	struct ip_tunnel *tunnel;
	struct iphdr *iph;

	tunnel = (struct ip_tunnel*)dev->priv;
	iph = &tunnel->parms.iph;

	ipip_tunnel_init_gen(dev);

	if (iph->daddr) {
		struct rtable *rt;
		if (!ip_route_output(&rt, iph->daddr, iph->saddr, RT_TOS(iph->tos), tunnel->parms.link)) {
			tdev = rt->u.dst.dev;
			ip_rt_put(rt);
		}
		dev->flags |= IFF_POINTOPOINT;
	}

	if (!tdev && tunnel->parms.link)
		tdev = dev_get_by_index(tunnel->parms.link);

	if (tdev) {
		dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
		dev->mtu = tdev->mtu - sizeof(struct iphdr);
	}
	dev->iflink = tunnel->parms.link;

	return 0;
}
Ejemplo n.º 3
0
static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
{
	struct sock *sk=icmp_socket->sk;
	struct ipcm_cookie ipc;
	struct rtable *rt = (struct rtable*)skb->dst;
	u32 daddr;

	if (ip_options_echo(&icmp_param->replyopts, skb))
		return;

	icmp_param->icmph.checksum=0;
	icmp_param->csum=0;
	icmp_out_count(icmp_param->icmph.type);

	sk->ip_tos = skb->nh.iph->tos;
	daddr = ipc.addr = rt->rt_src;
	ipc.opt = &icmp_param->replyopts;
	if (ipc.opt->srr)
		daddr = icmp_param->replyopts.faddr;
	if (ip_route_output(&rt, daddr, rt->rt_spec_dst, RT_TOS(skb->nh.iph->tos), 0))
		return;
	ip_build_xmit(sk, icmp_glue_bits, icmp_param, 
		icmp_param->data_len+sizeof(struct icmphdr),
		&ipc, rt, MSG_DONTWAIT);
	ip_rt_put(rt);
}
Ejemplo n.º 4
0
static int igmp_send_report(struct device *dev, u32 group, int type)
{
	struct sk_buff *skb;
	struct iphdr *iph;
	struct igmphdr *ih;
	struct rtable *rt;
	u32	dst;

	/* According to IGMPv2 specs, LEAVE messages are
	 * sent to all-routers group.
	 */
	dst = group;
	if (type == IGMP_HOST_LEAVE_MESSAGE)
		dst = IGMP_ALL_ROUTER;

	if (ip_route_output(&rt, dst, 0, 0, dev->ifindex))
		return -1;
	if (rt->rt_src == 0) {
		ip_rt_put(rt);
		return -1;
	}

	skb=alloc_skb(IGMP_SIZE+dev->hard_header_len+15, GFP_ATOMIC);
	if (skb == NULL) {
		ip_rt_put(rt);
		return -1;
	}

	skb->dst = &rt->u.dst;

	skb_reserve(skb, (dev->hard_header_len+15)&~15);

	skb->nh.iph = iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)+4);

	iph->version  = 4;
	iph->ihl      = (sizeof(struct iphdr)+4)>>2;
	iph->tos      = 0;
	iph->frag_off = 0;
	iph->ttl      = 1;
	iph->daddr    = dst;
	iph->saddr    = rt->rt_src;
	iph->protocol = IPPROTO_IGMP;
	iph->tot_len  = htons(IGMP_SIZE);
	iph->id	      = htons(ip_id_count++);
	((u8*)&iph[1])[0] = IPOPT_RA;
	((u8*)&iph[1])[1] = 4;
	((u8*)&iph[1])[2] = 0;
	((u8*)&iph[1])[3] = 0;
	ip_send_check(iph);

	ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
	ih->type=type;
	ih->code=0;
	ih->csum=0;
	ih->group=group;
	ih->csum=ip_compute_csum((void *)ih, sizeof(struct igmphdr));

	return skb->dst->output(skb);
}
Ejemplo n.º 5
0
static int arp_req_set(struct net *net, struct arpreq *r,
		       struct net_device *dev)
{
	__be32 ip;
	struct neighbour *neigh;
	int err;

	if (r->arp_flags & ATF_PUBL)
		return arp_req_set_public(net, r, dev);

	ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
	if (r->arp_flags & ATF_PERM)
		r->arp_flags |= ATF_COM;
	if (dev == NULL) {
		struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);

		if (IS_ERR(rt))
			return PTR_ERR(rt);
		dev = rt->dst.dev;
		ip_rt_put(rt);
		if (!dev)
			return -EINVAL;
	}
	switch (dev->type) {
#if IS_ENABLED(CONFIG_FDDI)
	case ARPHRD_FDDI:
		/*
		 * According to RFC 1390, FDDI devices should accept ARP
		 * hardware types of 1 (Ethernet).  However, to be more
		 * robust, we'll accept hardware types of either 1 (Ethernet)
		 * or 6 (IEEE 802.2).
		 */
		if (r->arp_ha.sa_family != ARPHRD_FDDI &&
		    r->arp_ha.sa_family != ARPHRD_ETHER &&
		    r->arp_ha.sa_family != ARPHRD_IEEE802)
			return -EINVAL;
		break;
#endif
	default:
		if (r->arp_ha.sa_family != dev->type)
			return -EINVAL;
		break;
	}

	neigh = __neigh_lookup_errno(&arp_tbl, &ip, dev);
	err = PTR_ERR(neigh);
	if (!IS_ERR(neigh)) {
		unsigned int state = NUD_STALE;
		if (r->arp_flags & ATF_PERM)
			state = NUD_PERMANENT;
		err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
				   r->arp_ha.sa_data : NULL, state,
				   NEIGH_UPDATE_F_OVERRIDE |
				   NEIGH_UPDATE_F_ADMIN);
		neigh_release(neigh);
	}
	return err;
}
int igmp_send_report_full(struct net_device *dev, u32 group, int type,
                          u8 respond, u32 dst)
{
    struct sk_buff *skb;
    struct iphdr *iph;
    struct igmphdr *ih;
    struct rtable *rt;

    if (ip_route_output(&rt, dst, 0, 0, dev->ifindex))
        return -1;
    if (rt->rt_src == 0) {
        ip_rt_put(rt);
        return -1;
    }

    skb=alloc_skb(IGMP_SIZE+dev->hard_header_len+15, GFP_ATOMIC);
    if (skb == NULL) {
        ip_rt_put(rt);
        return -1;
    }

    skb->dst = &rt->u.dst;

    skb_reserve(skb, (dev->hard_header_len+15)&~15);

    skb->nh.iph = iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)+4);

    iph->version  = 4;
    iph->ihl      = (sizeof(struct iphdr)+4)>>2;
    iph->tos      = 0;
    iph->frag_off = htons(IP_DF);
    iph->ttl      = 1;
    iph->daddr    = dst;
    iph->saddr    = rt->rt_src;
    iph->protocol = IPPROTO_IGMP;
    iph->tot_len  = htons(IGMP_SIZE);
    ip_select_ident(iph, &rt->u.dst, NULL);
    ((u8*)&iph[1])[0] = IPOPT_RA;
    ((u8*)&iph[1])[1] = 4;
    ((u8*)&iph[1])[2] = 0;
    ((u8*)&iph[1])[3] = 0;
    ip_send_check(iph);

    ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
    ih->type=type;
    ih->code=respond;
    ih->csum=0;
    ih->group=group;
    ih->csum=ip_compute_csum((void *)ih, sizeof(struct igmphdr));

    return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
                   output_maybe_reroute);
}
Ejemplo n.º 7
0
/*
 * sfe_cm_find_dev_and_mac_addr()
 *	Find the device and MAC address for a given IPv4 address.
 *
 * Returns true if we find the device and MAC address, otherwise false.
 *
 * We look up the rtable entry for the address and, from its neighbour
 * structure, obtain the hardware address.  This means this function also
 * works if the neighbours are routers too.
 */
static bool sfe_cm_find_dev_and_mac_addr(uint32_t addr, struct net_device **dev, uint8_t *mac_addr)
{
	struct neighbour *neigh;
	struct rtable *rt;
	struct dst_entry *dst;
	struct net_device *mac_dev;

	/*
	 * Look up the rtable entry for the IP address then get the hardware
	 * address from its neighbour structure.  This means this work when the
	 * neighbours are routers too.
	 */
	rt = ip_route_output(&init_net, addr, 0, 0, 0);
	if (unlikely(IS_ERR(rt))) {
		return false;
	}

	dst = (struct dst_entry *)rt;

	rcu_read_lock();
	neigh = dst_get_neighbour_noref(dst);
	if (unlikely(!neigh)) {
		rcu_read_unlock();
		dst_release(dst);
		return false; 
	}

	if (unlikely(!(neigh->nud_state & NUD_VALID))) {
		rcu_read_unlock();
		dst_release(dst);
		return false;
	}

	mac_dev = neigh->dev;
	if (!mac_dev) {
		rcu_read_unlock();
		dst_release(dst);
		return false;
	}

	memcpy(mac_addr, neigh->ha, (size_t)mac_dev->addr_len);

	dev_hold(mac_dev);
	*dev = mac_dev;
	rcu_read_unlock();

	dst_release(dst);

	return true;
}
Ejemplo n.º 8
0
static int ip_masq_user_maddr(struct ip_masq_user *ums)
{
	struct device *dev;
	struct rtable *rt;
	int ret = -EINVAL;
	u32 rt_daddr, rt_saddr;
	u32 tos;

	/*
	 *	Did specify masq address.
	 */
	if (ums->maddr)
		return 0;

	/*
	 *	Select address to use for routing query
	 */

	rt_daddr = ums->rt_daddr? ums->rt_daddr : ums->daddr;
	rt_saddr = ums->rt_saddr? ums->rt_saddr : ums->saddr;


	/*
	 *	No address for routing, cannot continue
	 */
	if (rt_daddr == 0) {
		IP_MASQ_DEBUG(1-debug, "cannot setup maddr with daddr=%lX, rt_addr=%lX\n",
			     ntohl(ums->daddr), ntohl(ums->rt_daddr));
		return -EINVAL;
	}

	/*
	 *	Find out rt device 
	 */

	rt_saddr = 0; 
	tos = RT_TOS(ums->ip_tos) | RTO_CONN;

	if ((ret=ip_route_output(&rt, rt_daddr, rt_saddr, tos, 0 /* dev */))) {
		IP_MASQ_DEBUG(0-debug, "could not setup maddr for routing daddr=%lX, saddr=%lX\n",
			     ntohl(rt_daddr), ntohl(rt_saddr));
		return ret;
	}
	dev = rt->u.dst.dev;
	ums->maddr = ip_masq_select_addr(dev, rt->rt_gateway, RT_SCOPE_UNIVERSE);

	IP_MASQ_DEBUG(1-debug, "did setup maddr=%lX\n", ntohl(ums->maddr));
	ip_rt_put(rt);
	return 0;
}
Ejemplo n.º 9
0
static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
{
	struct rtable *rt;
	int flag = 0;
	/*unsigned long now; */
	struct net *net = dev_net(dev);

	rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
	if (IS_ERR(rt))
		return 1;
	if (rt->dst.dev != dev) {
		__NET_INC_STATS(net, LINUX_MIB_ARPFILTER);
		flag = 1;
	}
	ip_rt_put(rt);
	return flag;
}
Ejemplo n.º 10
0
static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
{
	struct rtable *rt;
	int flag = 0;
	
	struct net *net = dev_net(dev);

	rt = ip_route_output(net, sip, tip, 0, 0);
	if (IS_ERR(rt))
		return 1;
	if (rt->dst.dev != dev) {
		NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
		flag = 1;
	}
	ip_rt_put(rt);
	return flag;
}
Ejemplo n.º 11
0
static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
{
	struct neighbour *neigh;
	struct atmarp_entry *entry;
	int error;
	struct clip_vcc *clip_vcc;
	struct rtable *rt;

	if (vcc->push != clip_push) {
		pr_warning("non-CLIP VCC\n");
		return -EBADF;
	}
	clip_vcc = CLIP_VCC(vcc);
	if (!ip) {
		if (!clip_vcc->entry) {
			pr_err("hiding hidden ATMARP entry\n");
			return 0;
		}
		pr_debug("remove\n");
		unlink_clip_vcc(clip_vcc);
		return 0;
	}
	rt = ip_route_output(&init_net, ip, 0, 1, 0);
	if (IS_ERR(rt))
		return PTR_ERR(rt);
	neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1);
	ip_rt_put(rt);
	if (!neigh)
		return -ENOMEM;
	entry = neighbour_priv(neigh);
	if (entry != clip_vcc->entry) {
		if (!clip_vcc->entry)
			pr_debug("add\n");
		else {
			pr_debug("update\n");
			unlink_clip_vcc(clip_vcc);
		}
		link_vcc(clip_vcc, entry);
	}
	error = neigh_update(neigh, llc_oui, NUD_PERMANENT,
			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN);
	neigh_release(neigh);
	return error;
}
Ejemplo n.º 12
0
static int arp_req_delete(struct net *net, struct arpreq *r,
			  struct net_device *dev)
{
	__be32 ip;

	if (r->arp_flags & ATF_PUBL)
		return arp_req_delete_public(net, r, dev);

	ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
	if (dev == NULL) {
		struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
		if (IS_ERR(rt))
			return PTR_ERR(rt);
		dev = rt->dst.dev;
		ip_rt_put(rt);
		if (!dev)
			return -EINVAL;
	}
	return arp_invalidate(dev, ip);
}
int clip_setentry(struct atm_vcc *vcc,u32 ip)
{
	struct neighbour *neigh;
	struct atmarp_entry *entry;
	int error;
	struct clip_vcc *clip_vcc;
	struct rtable *rt;

	if (vcc->push != clip_push) {
		printk(KERN_WARNING "clip_setentry: non-CLIP VCC\n");
		return -EBADF;
	}
	clip_vcc = CLIP_VCC(vcc);
	if (!ip) {
		if (!clip_vcc->entry) {
			printk(KERN_ERR "hiding hidden ATMARP entry\n");
			return 0;
		}
		DPRINTK("setentry: remove\n");
		unlink_clip_vcc(clip_vcc);
		return 0;
	}
	error = ip_route_output(&rt,ip,0,1,0);
	if (error) return error;
	neigh = __neigh_lookup(&clip_tbl,&ip,rt->u.dst.dev,1);
	ip_rt_put(rt);
	if (!neigh)
		return -ENOMEM;
	entry = NEIGH2ENTRY(neigh);
	if (entry != clip_vcc->entry) {
		if (!clip_vcc->entry) DPRINTK("setentry: add\n");
		else {
			DPRINTK("setentry: update\n");
			unlink_clip_vcc(clip_vcc);
		}
		link_vcc(clip_vcc,entry);
	}
	error = neigh_update(neigh,llc_oui,NUD_PERMANENT,1,0);
	neigh_release(neigh);
	return error;
}
Ejemplo n.º 14
0
static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
{
	struct rtable *rt;
	struct device *dev = NULL;

	if (imr->imr_address.s_addr) {
		dev = ip_dev_find(imr->imr_address.s_addr);
		if (!dev)
			return NULL;
	}

	if (!dev && !ip_route_output(&rt, imr->imr_multiaddr.s_addr, 0, 0, 0)) {
		dev = rt->u.dst.dev;
		ip_rt_put(rt);
	}
	if (dev) {
		imr->imr_ifindex = dev->ifindex;
		return dev->ip_ptr;
	}
	return NULL;
}
static int route_mirror(struct sk_buff *skb)
{
        struct iphdr *iph = skb->nh.iph;
	struct rtable *rt;

	/* Backwards */
	if (ip_route_output(&rt, iph->saddr, iph->daddr,
			    RT_TOS(iph->tos) | RTO_CONN,
			    0)) {
		return 0;
	}

	/* check if the interface we are leaving by is the same as the
           one we arrived on */
	if (skb->dev == rt->u.dst.dev) {
		/* Drop old route. */
		dst_release(skb->dst);
		skb->dst = &rt->u.dst;
		return 1;
	}
	return 0;
}
// this method will send the message to the destination machine using ipv4
static int cse536_sendmsg(char *data, size_t len)
{
	struct sk_buff *skb;
	struct iphdr *iph;
	struct rtable *rt;
	struct net *net = &init_net;
	unsigned char *skbdata;

	// create and setup an sk_buff	
	skb = alloc_skb(sizeof(struct iphdr) + 4096, GFP_ATOMIC);
	skb_reserve(skb, sizeof(struct iphdr) + 1500);
	skbdata = skb_put(skb, len);
//	skb->csum = csum_and_copy_from_user(data, skbdata, len, 0, &err);
	memcpy(skbdata, data, len);

	// setup and add the ip header
	skb_push(skb, sizeof(struct iphdr));
	skb_reset_network_header(skb);
	iph = ip_hdr(skb);
	iph->version  = 4;
	iph->ihl      = 5;
	iph->tos      = 0;
	iph->frag_off = 0;
	iph->ttl      = 64;
	iph->daddr    = cse536_daddr;
	iph->saddr    = cse536_saddr;
	iph->protocol = IPPROTO_CSE536;	// my protocol number
	iph->id       = htons(1);
	iph->tot_len  = htons(skb->len);

	// get the route. this seems to be necessary, does not work without
	rt = ip_route_output(net, cse536_daddr, cse536_saddr, 0,0);	
	skb_dst_set(skb, &rt->dst);
	
	//printk("skb data: %s", skbdata);
	return ip_local_out(skb);
}
Ejemplo n.º 17
0
/* This requires some explaining. If DNAT has taken place,
 * we will need to fix up the destination Ethernet address.
 * This is also true when SNAT takes place (for the reply direction).
 *
 * There are two cases to consider:
 * 1. The packet was DNAT'ed to a device in the same bridge
 *    port group as it was received on. We can still bridge
 *    the packet.
 * 2. The packet was DNAT'ed to a different device, either
 *    a non-bridged device or another bridge port group.
 *    The packet will need to be routed.
 *
 * The correct way of distinguishing between these two cases is to
 * call ip_route_input() and to look at skb->dst->dev, which is
 * changed to the destination device if ip_route_input() succeeds.
 *
 * Let's first consider the case that ip_route_input() succeeds:
 *
 * If the output device equals the logical bridge device the packet
 * came in on, we can consider this bridging. The corresponding MAC
 * address will be obtained in br_nf_pre_routing_finish_bridge.
 * Otherwise, the packet is considered to be routed and we just
 * change the destination MAC address so that the packet will
 * later be passed up to the IP stack to be routed. For a redirected
 * packet, ip_route_input() will give back the localhost as output device,
 * which differs from the bridge device.
 *
 * Let's now consider the case that ip_route_input() fails:
 *
 * This can be because the destination address is martian, in which case
 * the packet will be dropped.
 * If IP forwarding is disabled, ip_route_input() will fail, while
 * ip_route_output_key() can return success. The source
 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
 * thinks we're handling a locally generated packet and won't care
 * if IP forwarding is enabled. If the output device equals the logical bridge
 * device, we proceed as if ip_route_input() succeeded. If it differs from the
 * logical bridge port or if ip_route_output_key() fails we drop the packet.
 */
static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	struct iphdr *iph = ip_hdr(skb);
	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
	struct rtable *rt;
	int err;

	nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;

	if (nf_bridge->pkt_otherhost) {
		skb->pkt_type = PACKET_OTHERHOST;
		nf_bridge->pkt_otherhost = false;
	}
	nf_bridge->in_prerouting = 0;
	if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
			struct in_device *in_dev = __in_dev_get_rcu(dev);

			/* If err equals -EHOSTUNREACH the error is due to a
			 * martian destination or due to the fact that
			 * forwarding is disabled. For most martian packets,
			 * ip_route_output_key() will fail. It won't fail for 2 types of
			 * martian destinations: loopback destinations and destination
			 * 0.0.0.0. In both cases the packet will be dropped because the
			 * destination is the loopback device and not the bridge. */
			if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
				goto free_skb;

			rt = ip_route_output(net, iph->daddr, 0,
					     RT_TOS(iph->tos), 0);
			if (!IS_ERR(rt)) {
				/* - Bridged-and-DNAT'ed traffic doesn't
				 *   require ip_forwarding. */
				if (rt->dst.dev == dev) {
					skb_dst_set(skb, &rt->dst);
					goto bridged_dnat;
				}
				ip_rt_put(rt);
			}
free_skb:
			kfree_skb(skb);
			return 0;
		} else {
			if (skb_dst(skb)->dev == dev) {
bridged_dnat:
				skb->dev = nf_bridge->physindev;
				nf_bridge_update_protocol(skb);
				nf_bridge_push_encap_header(skb);
				br_nf_hook_thresh(NF_BR_PRE_ROUTING,
						  net, sk, skb, skb->dev,
						  NULL,
						  br_nf_pre_routing_finish_bridge);
				return 0;
			}
			ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
			skb->pkt_type = PACKET_HOST;
		}
	} else {
		rt = bridge_parent_rtable(nf_bridge->physindev);
		if (!rt) {
			kfree_skb(skb);
			return 0;
		}
		skb_dst_set_noref(skb, &rt->dst);
	}

	skb->dev = nf_bridge->physindev;
	nf_bridge_update_protocol(skb);
	nf_bridge_push_encap_header(skb);
	br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
			  br_handle_frame_finish);
	return 0;
}
Ejemplo n.º 18
0
static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
	struct net_device_stats *stats = &tunnel->stat;
	struct iphdr  *tiph = &tunnel->parms.iph;
	struct ipv6hdr *iph6 = skb->nh.ipv6h;
	u8     tos = tunnel->parms.iph.tos;
	struct rtable *rt;     			/* Route to the other host */
	struct net_device *tdev;			/* Device to other host */
	struct iphdr  *iph;			/* Our new IP header */
	int    max_headroom;			/* The extra header space needed */
	u32    dst = tiph->daddr;
	int    mtu;
	struct in6_addr *addr6;	
	int addr_type;

	if (tunnel->recursion++) {
		tunnel->stat.collisions++;
		goto tx_error;
	}

	if (skb->protocol != htons(ETH_P_IPV6))
		goto tx_error;

	if (!dst)
		dst = try_6to4(&iph6->daddr);

	if (!dst) {
		struct neighbour *neigh = NULL;

		if (skb->dst)
			neigh = skb->dst->neighbour;

		if (neigh == NULL) {
			if (net_ratelimit())
				printk(KERN_DEBUG "sit: nexthop == NULL\n");
			goto tx_error;
		}

		addr6 = (struct in6_addr*)&neigh->primary_key;
		addr_type = ipv6_addr_type(addr6);

		if (addr_type == IPV6_ADDR_ANY) {
			addr6 = &skb->nh.ipv6h->daddr;
			addr_type = ipv6_addr_type(addr6);
		}

		if (addr_type & IPV6_ADDR_COMPATv4)
			dst = addr6->s6_addr32[3];
		else
#ifdef CONFIG_IPV6_6TO4_NEXTHOP
		if (!(dst = try_6to4(addr6)))
#endif
			goto tx_error_icmp;
	}

	if (ip_route_output(&rt, dst, tiph->saddr, RT_TOS(tos), tunnel->parms.link)) {
		tunnel->stat.tx_carrier_errors++;
		goto tx_error_icmp;
	}
	if (rt->rt_type != RTN_UNICAST) {
		tunnel->stat.tx_carrier_errors++;
		goto tx_error_icmp;
	}
	tdev = rt->u.dst.dev;

	if (tdev == dev) {
		ip_rt_put(rt);
		tunnel->stat.collisions++;
		goto tx_error;
	}

	if (tiph->frag_off)
		mtu = rt->u.dst.pmtu - sizeof(struct iphdr);
	else
		mtu = skb->dst ? skb->dst->pmtu : dev->mtu;

	if (mtu < 68) {
		tunnel->stat.collisions++;
		ip_rt_put(rt);
		goto tx_error;
	}
	if (mtu < IPV6_MIN_MTU)
		mtu = IPV6_MIN_MTU;
	if (skb->dst && mtu < skb->dst->pmtu) {
		struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
		if (mtu < rt6->u.dst.pmtu) {
			if (tunnel->parms.iph.daddr || rt6->rt6i_dst.plen == 128) {
				rt6->rt6i_flags |= RTF_MODIFIED;
				rt6->u.dst.pmtu = mtu;
			}
		}
	}
	if (skb->len > mtu) {
		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
		ip_rt_put(rt);
		goto tx_error;
	}

	if (tunnel->err_count > 0) {
		if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
			tunnel->err_count--;
			dst_link_failure(skb);
		} else
			tunnel->err_count = 0;
	}

	/*
	 * Okay, now see if we can stuff it in the buffer as-is.
	 */
	max_headroom = (((tdev->hard_header_len+15)&~15)+sizeof(struct iphdr));

	if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
		if (!new_skb) {
			ip_rt_put(rt);
  			stats->tx_dropped++;
			dev_kfree_skb(skb);
			tunnel->recursion--;
			return 0;
		}
		if (skb->sk)
			skb_set_owner_w(new_skb, skb->sk);
		dev_kfree_skb(skb);
		skb = new_skb;
		iph6 = skb->nh.ipv6h;
	}

	skb->h.raw = skb->nh.raw;
	skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
	dst_release(skb->dst);
	skb->dst = &rt->u.dst;

	/*
	 *	Push down and install the IPIP header.
	 */

	iph 			=	skb->nh.iph;
	iph->version		=	4;
	iph->ihl		=	sizeof(struct iphdr)>>2;
	if (mtu > IPV6_MIN_MTU)
		iph->frag_off	=	htons(IP_DF);
	else
		iph->frag_off	=	0;

	iph->protocol		=	IPPROTO_IPV6;
	iph->tos		=	INET_ECN_encapsulate(tos, ip6_get_dsfield(iph6));
	iph->daddr		=	rt->rt_dst;
	iph->saddr		=	rt->rt_src;

	if ((iph->ttl = tiph->ttl) == 0)
		iph->ttl	=	iph6->hop_limit;

	nf_reset(skb);

	IPTUNNEL_XMIT();
	tunnel->recursion--;
	return 0;

tx_error_icmp:
	dst_link_failure(skb);
tx_error:
	stats->tx_errors++;
	dev_kfree_skb(skb);
	tunnel->recursion--;
	return 0;
}
Ejemplo n.º 19
0
/* This requires some explaining. If DNAT has taken place,
 * we will need to fix up the destination Ethernet address.
 *
 * There are two cases to consider:
 * 1. The packet was DNAT'ed to a device in the same bridge
 *    port group as it was received on. We can still bridge
 *    the packet.
 * 2. The packet was DNAT'ed to a different device, either
 *    a non-bridged device or another bridge port group.
 *    The packet will need to be routed.
 *
 * The correct way of distinguishing between these two cases is to
 * call ip_route_input() and to look at skb->dst->dev, which is
 * changed to the destination device if ip_route_input() succeeds.
 *
 * Let's first consider the case that ip_route_input() succeeds:
 *
 * If the output device equals the logical bridge device the packet
 * came in on, we can consider this bridging. The corresponding MAC
 * address will be obtained in br_nf_pre_routing_finish_bridge.
 * Otherwise, the packet is considered to be routed and we just
 * change the destination MAC address so that the packet will
 * later be passed up to the IP stack to be routed. For a redirected
 * packet, ip_route_input() will give back the localhost as output device,
 * which differs from the bridge device.
 *
 * Let's now consider the case that ip_route_input() fails:
 *
 * This can be because the destination address is martian, in which case
 * the packet will be dropped.
 * If IP forwarding is disabled, ip_route_input() will fail, while
 * ip_route_output_key() can return success. The source
 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
 * thinks we're handling a locally generated packet and won't care
 * if IP forwarding is enabled. If the output device equals the logical bridge
 * device, we proceed as if ip_route_input() succeeded. If it differs from the
 * logical bridge port or if ip_route_output_key() fails we drop the packet.
 */
static int br_nf_pre_routing_finish(struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	struct iphdr *iph = ip_hdr(skb);
	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
	struct rtable *rt;
	int err;

	if (nf_bridge->mask & BRNF_PKT_TYPE) {
		skb->pkt_type = PACKET_OTHERHOST;
		nf_bridge->mask ^= BRNF_PKT_TYPE;
	}
	nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
	if (dnat_took_place(skb)) {
		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
			struct in_device *in_dev = __in_dev_get_rcu(dev);

			/* If err equals -EHOSTUNREACH the error is due to a
			 * martian destination or due to the fact that
			 * forwarding is disabled. For most martian packets,
			 * ip_route_output_key() will fail. It won't fail for 2 types of
			 * martian destinations: loopback destinations and destination
			 * 0.0.0.0. In both cases the packet will be dropped because the
			 * destination is the loopback device and not the bridge. */
			if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
				goto free_skb;

			rt = ip_route_output(dev_net(dev), iph->daddr, 0,
					     RT_TOS(iph->tos), 0);
			if (!IS_ERR(rt)) {
				/* - Bridged-and-DNAT'ed traffic doesn't
				 *   require ip_forwarding. */
				if (rt->dst.dev == dev) {
					skb_dst_set(skb, &rt->dst);
					goto bridged_dnat;
				}
				ip_rt_put(rt);
			}
free_skb:
			kfree_skb(skb);
			return 0;
		} else {
			if (skb_dst(skb)->dev == dev) {
bridged_dnat:
				skb->dev = nf_bridge->physindev;
				nf_bridge_update_protocol(skb);
				nf_bridge_push_encap_header(skb);
				NF_HOOK_THRESH(NFPROTO_BRIDGE,
					       NF_BR_PRE_ROUTING,
					       skb, skb->dev, NULL,
					       br_nf_pre_routing_finish_bridge,
					       1);
				return 0;
			}
			memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
			skb->pkt_type = PACKET_HOST;
		}
	} else {
		rt = bridge_parent_rtable(nf_bridge->physindev);
		if (!rt) {
			kfree_skb(skb);
			return 0;
		}
		skb_dst_set_noref(skb, &rt->dst);
	}

	skb->dev = nf_bridge->physindev;
	nf_bridge_update_protocol(skb);
	nf_bridge_push_encap_header(skb);
	NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
		       br_handle_frame_finish, 1);

	return 0;
}
Ejemplo n.º 20
0
static void send_unreach(struct sk_buff *skb_in, int code)
{
	struct iphdr *iph;
	struct udphdr *udph;
	struct icmphdr *icmph;
	struct sk_buff *nskb;
	u32 saddr;
	u8 tos;
	int hh_len, length;
	struct rtable *rt = (struct rtable*)skb_in->dst;
	unsigned char *data;

	if (!rt)
		return;

	/* FIXME: Use sysctl number. --RR */
	if (!xrlim_allow(&rt->u.dst, 1*HZ))
		return;

	iph = skb_in->nh.iph;

	/* No replies to physical multicast/broadcast */
	if (skb_in->pkt_type!=PACKET_HOST)
		return;

	/* Now check at the protocol level */
	if (rt->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST))
		return;

	/* Only reply to fragment 0. */
	if (iph->frag_off&htons(IP_OFFSET))
		return;

	/* if UDP checksum is set, verify it's correct */
	if (iph->protocol == IPPROTO_UDP
	    && skb_in->tail-(u8*)iph >= sizeof(struct udphdr)) {
		int datalen = skb_in->len - (iph->ihl<<2);
		udph = (struct udphdr *)((char *)iph + (iph->ihl<<2));
		if (udph->check
		    && csum_tcpudp_magic(iph->saddr, iph->daddr,
		                         datalen, IPPROTO_UDP,
		                         csum_partial((char *)udph, datalen,
		                                      0)) != 0)
			return;
	}
		    
	/* If we send an ICMP error to an ICMP error a mess would result.. */
	if (iph->protocol == IPPROTO_ICMP
	    && skb_in->tail-(u8*)iph >= sizeof(struct icmphdr)) {
		icmph = (struct icmphdr *)((char *)iph + (iph->ihl<<2));
		/* Between echo-reply (0) and timestamp (13),
		   everything except echo-request (8) is an error.
		   Also, anything greater than NR_ICMP_TYPES is
		   unknown, and hence should be treated as an error... */
		if ((icmph->type < ICMP_TIMESTAMP
		     && icmph->type != ICMP_ECHOREPLY
		     && icmph->type != ICMP_ECHO)
		    || icmph->type > NR_ICMP_TYPES)
			return;
	}

	saddr = iph->daddr;
	if (!(rt->rt_flags & RTCF_LOCAL))
		saddr = 0;

	tos = (iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL;

	if (ip_route_output(&rt, iph->saddr, saddr, RT_TOS(tos), 0))
		return;

	/* RFC says return as much as we can without exceeding 576 bytes. */
	length = skb_in->len + sizeof(struct iphdr) + sizeof(struct icmphdr);

	if (length > rt->u.dst.pmtu)
		length = rt->u.dst.pmtu;
	if (length > 576)
		length = 576;

	hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;

	nskb = alloc_skb(hh_len+15+length, GFP_ATOMIC);
	if (!nskb) {
		ip_rt_put(rt);
		return;
	}

	nskb->priority = 0;
	nskb->dst = &rt->u.dst;
	skb_reserve(nskb, hh_len);

	/* Set up IP header */
	iph = nskb->nh.iph
		= (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
	iph->version=4;
	iph->ihl=5;
	iph->tos=tos;
	iph->tot_len = htons(length);

	/* PMTU discovery never applies to ICMP packets. */
	iph->frag_off = 0;

	iph->ttl = MAXTTL;
	ip_select_ident(iph, &rt->u.dst, NULL);
	iph->protocol=IPPROTO_ICMP;
	iph->saddr=rt->rt_src;
	iph->daddr=rt->rt_dst;
	iph->check=0;
	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);

	/* Set up ICMP header. */
	icmph = nskb->h.icmph
		= (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
	icmph->type = ICMP_DEST_UNREACH;
	icmph->code = code;	
	icmph->un.gateway = 0;
	icmph->checksum = 0;
	
	/* Copy as much of original packet as will fit */
	data = skb_put(nskb,
		       length - sizeof(struct iphdr) - sizeof(struct icmphdr));
	/* FIXME: won't work with nonlinear skbs --RR */
	memcpy(data, skb_in->nh.iph,
	       length - sizeof(struct iphdr) - sizeof(struct icmphdr));
	icmph->checksum = ip_compute_csum((unsigned char *)icmph,
					  length - sizeof(struct iphdr));

	nf_ct_attach(nskb, skb_in);

	NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
		ip_finish_output);
}	
Ejemplo n.º 21
0
static void send(struct RTK_TRAP_profile *ptr, const void *ptr_data, uint32 data_len)
{
	uint8 *tmp;
	uint32 tos;
	Tsudphdr *udphdr;
	Tsiphdr *iphdr;
	struct sk_buff *skb;
	struct rtable *rt = NULL;
	int rst;
	//int i;
	//struct dst_entry *dst = skb->dst;
	//struct hh_cache *hh;
	//struct neighbour *n;
#ifdef SUPPORT_VOICE_QOS
	tos = ptr->tos;
#endif	
#ifdef SUPPORT_DSCP
	tos = rtp_tos;
#endif	


//	printk("enter send function\n");
	//printk("profile: ip_dst = %x, ip_src = %x\n", ptr->ip_dst_addr, ptr->ip_src_addr);
	/* ip_src_addr is destination address */
#ifdef CONFIG_RTK_VOIP_SRTP
	err_status_t stat = 0;
#ifdef FEATURE_COP3_PROFILE	  
	unsigned long flags;
	save_flags(flags); cli();
	ProfileEnterPoint(PROFILE_INDEX_TEMP);
#endif	
	/* apply srtp */
        if (ptr->applySRTP){
#ifndef AUDIOCODES_VOIP
        	extern int rtcp_sid_offset;
        	//if((ptr->udp_dst_port == 9001) || (ptr->udp_dst_port == 9003) || (ptr->udp_dst_port == 9005) || (ptr->udp_dst_port == 9007)){
        	if(ptr->s_id >= rtcp_sid_offset){
#else
		if(((ptr->s_id)%2) == 1 ){ // ACMW RTP sid = 2*CH, RTCP sid = 2*CH + 1
#endif
        		stat = srtp_protect_rtcp(ptr->tx_srtp_ctx, ptr_data, &data_len);
        	}
		else{
			stat = srtp_protect(ptr->tx_srtp_ctx, ptr_data, &data_len);
		}
	}

#ifdef FEATURE_COP3_PROFILE	  
	ProfileExitPoint(PROFILE_INDEX_TEMP);
	restore_flags(flags);
	ProfilePerDump(PROFILE_INDEX_TEMP, 1000);
#endif	
	if (stat) {
		printk("error: srtp protection failed with code %d\n", stat);
	    	return;
	}	
#endif	
	// I: .7960 (.6416)
	// D: .7806 (.2803)
	{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
	struct flowi key = { .oif = 0, .flags = FLOWI_FLAG_ANYSRC, .nl_u =  { .ip4_u = { .daddr = ptr->ip_src_addr,
                                                          .saddr = ptr->ip_dst_addr,
                                                          .tos = (uint32)(RT_TOS(tos)) }}};
	extern struct net init_net;
	if((rst = ip_route_output_key(&init_net, &rt, &key)))
#else
	if((rst = ip_route_output(&rt, ptr->ip_src_addr, ptr->ip_dst_addr,(uint32)(RT_TOS(tos)), 0)))
#endif
//	if(rst = ip_route_output(&rt, ptr->ip_src_addr, 0,(Tuint32)(RT_TOS(tos)), 0))
	{
		//printk("ip_route_output failed rst = %d\n", rst);
		//printk("**rt = %x\n", *rt);
		//printk("RTK_TRAP info: ip_dst_addr = %x, ip_src_addr = %x \n", ptr->ip_dst_addr,  ptr->ip_src_addr);
		printk("NR ");
		//printk("TX err: %x->%x\n", ptr->ip_dst_addr,  ptr->ip_src_addr);
		return;
	}
	}	// pkshih: avoid compiler warning 

	// I: .6438 (.4964)
	// D: .6290 (.1713)
	//skb = dev_alloc_skb(data_len + 20 + 8);
	skb = alloc_skb(data_len+4+16 + 20 + 8, GFP_ATOMIC);
	if (skb == NULL){
#if 1
		printk("%s-%s(): alloc_skb failed:(%d)\n", __FILE__, __FUNCTION__, tx_skb_cnt);
#else	
		printk("send skb_alloc return NULL. Drop it.\n");
		printk("final [%d] ", tx_skb_cnt);
		cli();
		while(1);
#endif		
		return ;
	}

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
	skb->sk = (void*)1; // for NAT 
#endif

#if 0
	skb_reserve(skb,4);
	if(skb->data - skb->head >=4){
		tx_skb_cnt+=1;
		(*(int*)(skb->head)) = 0x81868711;
		//printk("[%d] ", tx_skb_cnt);
#if 0		
		if(tx_skb_cnt > 127)
		  printk("[%d] ", tx_skb_cnt);

		if(tx_skb_cnt ==500)
		{
			printk("while(1) due to tx_skb_cnt > 500\n");
			while(1);
		}
#endif		
	}
	
#endif
	skb_reserve(skb,16);

	//printk("skb_put before\n");
	tmp = skb_put(skb, data_len + 20 + 8); //tmp = skb->data
	//printk("******skb_put ok\n");

	iphdr = (Tsiphdr *)tmp;
	udphdr = (Tsudphdr *)(tmp+20);

/* ip */
	iphdr->version = 4;
	iphdr->ihl = 5;
	iphdr->tos = tos;	// TOS
	iphdr->tot_len = htons(data_len + 8 + 20);
	iphdr->id = 0;
	iphdr->frag_off = 0;
	iphdr->ttl = 0x40;
	iphdr->protocol = 0x11;
	iphdr->check = 0;
	iphdr->saddr = ptr->ip_dst_addr;
	iphdr->daddr = ptr->ip_src_addr;
	iphdr->check = ip_fast_csum((uint8 *)(iphdr), 5);

/* udp */
	udphdr->source = ptr->udp_dst_port;
	udphdr->dest = ptr->udp_src_port;
	udphdr->len = htons(data_len + 8);
	udphdr->check = 0;

/* rtp */	
	memcpy(tmp+28, ptr_data, data_len);

#ifdef USE_DST_OUTPUT
//shlee, for 2.6.32
  #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
	skb_dst_set(skb, dst_clone(&rt->u.dst));
	skb->dev = skb_dst(skb)->dev;
  #else	
	skb->dst = dst_clone(&rt->u.dst);
	skb->dev = (skb->dst)->dev;
  #endif

  #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21))
	skb_reset_network_header(skb);
  #else
	skb->nh.iph = (struct iphdr*)(skb->data);
  #endif
	// Linux default qdisc pfifo has 3 queue (0,1,2). q0 is the highest priority,
	// and skb->priority 6 and 7 will be put into queue 0.
	skb->priority = 7;
	//skb->dst->output(skb);
  #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
#define NF_IP_LOCAL_OUT		3
  #endif
	// I: 4.6239 (3.4241)
	// D: 4.6088 (1.2092)
//shlee, for 2.6.32
  #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
	NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
                   skb_dst(skb)->output);	
  #else 
	NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
                   skb->dst->output);	
  #endif
     #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
	ip_rt_put(rt);
     #endif
        return;
#else
	skb->dst = &rt->u.dst;
	skb->dev = (skb->dst)->dev;

/* ethernet */
		dst = skb->dst;
		hh = (skb->dst)->hh;
  #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21))
		skb_reset_network_header(skb);
  #else
		skb->nh.raw = skb->data;
  #endif
/*
	        printk("neigh_lookup start\n");
		printk("dst->neighbour->tbl = %x\n",(dst->neighbour)->tbl);
		n = neigh_lookup(dst->neighbour->tbl, ptr->ip_src_addr, skb->dev);
		printk("n = %x\n", n);
*/

#if 1
		//printk("hh = %x\n", hh);
        	if (hh) {
        		read_lock_bh(&hh->hh_lock);
			//for(i = 0;i<4;i++)
			//printk("hh->hh_data[%d] = %08x\n", i,  hh->hh_data[i] );
			//printk("hh->hh_len =%d\n", hh->hh_len);
          		memcpy(skb->data - 16, hh->hh_data, 16);
        		read_unlock_bh(&hh->hh_lock);
        	        skb_push(skb, hh->hh_len);
			//printk("skb->dev = %x\n", skb->dev);
			//printk("skb->dev->name = %s\n", skb->dev->name);
			//printk("hh_output = %x\n", hh->hh_output);
        		//hh->hh_output(skb);
			skb->dev->hard_start_xmit(skb, skb->dev);
			//printk("hh_output\n");
        		//return hh->hh_output(skb);
        	} else if (dst->neighbour) {
			//printk("arp\n");
#if 0
			printk("dst->neighbour->output = %x\n",dst->neighbour->output);
			printk("skb->len = %x\n",skb->len);
			printk("skb->dev = %x\n", skb->dev);
			printk("skb->dev->name = %s\n", skb->dev->name);
#endif
        		dst->neighbour->output(skb);
			//printk("dst->neighbour->output\n");
        		//return dst->neighbour->output(skb);
		}
#endif
#if 0
	printk("(skb->dst)->hh = %x\n", (skb->dst)->hh);
	memcpy(tmp - 2, ((skb->dst)->hh)->hh_data, 16);
	dev_queue_xmit (skb);
#endif
	//printk("finidsh send\n");
	return;
#endif  /* USE_DST_OUTPUT */
}

#if defined (AUDIOCODES_VOIP)
void rtp_send_aux(uint32 ip_src_addr, uint32 ip_dst_addr, uint16 udp_src_port, uint16 udp_dst_port, void *ptr_data, uint32 data_len)
{
	struct RTK_TRAP_profile rtkTrapPrf;

	rtkTrapPrf.ip_dst_addr = ip_dst_addr;
	rtkTrapPrf.ip_src_addr = ip_src_addr;
	rtkTrapPrf.udp_dst_port = udp_dst_port;
	rtkTrapPrf.udp_src_port = udp_src_port;
	rtkTrapPrf.rtk_trap_callback = NULL;
	rtkTrapPrf.next = NULL;

	send(&rtkTrapPrf, ptr_data, data_len);
}
#endif
//====================================================================================//

#ifdef RTP_SNED_TASKLET

void rtp_send_2(unsigned long *dummy)
{	
	unsigned char chid;
	unsigned long flags;
        //unsigned char rtp_w_now[MAX_VOIP_CH_NUM], pload_w_now[MAX_VOIP_CH_NUM];
	//static unsigned char cnt=0;
	
	for (chid=0; chid < DSP_CH_NUM; chid++)
	{
		//rtp_w_now[chid]= rtp_w[chid];
		//pload_w_now[chid]=pload_w[chid];
		
		//printk("%s-%d\n",__FUNCTION__, __LINE__);	
		//if ( (rtp_r[chid] == rtp_w_now[chid]) && (pload_r[chid] == pload_w_now[chid]) )
		//	printk("RTP SNED & Payload FIFO Empty\n");
		//else
		//{	
			//while (!((rtp_r[chid] == rtp_w_now[chid]) && ( pload_r[chid] == pload_w_now[chid])))
			while (!((rtp_r[chid] == rtp_w[chid]) && ( pload_r[chid] == pload_w[chid])))
			{
				//printk("%s-%d\n",__FUNCTION__, __LINE__);	
				send(&Rtp_send[chid][rtp_r[chid]], &Rtp_fifo[chid][pload_r[chid] * 512], pload_len);
				//printk("%s-%d\n",__FUNCTION__, __LINE__);
				save_flags(flags); cli();	
				rtp_r[chid] = (rtp_r[chid] + 1)%FIFO_NUM;
				pload_r[chid] = (pload_r[chid] + 1)%FIFO_NUM;
				restore_flags(flags);
				
				if (rtp_r[chid]!=pload_r[chid])
					printk("Error!! rtp_r!=pload_r\n");
			}
			//if (cnt >= 2)
				//printk("%d ", cnt);
			//cnt=0;
		//}
	}
}
Ejemplo n.º 22
0
/* Paul: This seems to be unused dead code */
enum ipsec_xmit_value
ipsec_mast_send(struct ipsec_xmit_state*ixs)
{
	/* new route/dst cache code from James Morris */
	ixs->skb->dev = ixs->physdev;
	/*skb_orphan(ixs->skb);*/
	if((ixs->error = ip_route_output(&ixs->route,
				    ixs->skb->nh.iph->daddr,
				    ixs->pass ? 0 : ixs->skb->nh.iph->saddr,
				    RT_TOS(ixs->skb->nh.iph->tos),
				    ixs->physdev->ifindex /* rgb: should this be 0? */))) {
		ixs->stats->tx_errors++;
		KLIPS_PRINT(debug_mast & DB_MAST_XMIT,
			    "klips_debug:ipsec_mast_send: "
			    "ip_route_output failed with error code %d, dropped\n",
			    ixs->error);
		return IPSEC_XMIT_ROUTEERR;
	}
	if(ixs->dev == ixs->route->u.dst.dev) {
		ip_rt_put(ixs->route);
		/* This is recursion, drop it. */
		ixs->stats->tx_errors++;
		KLIPS_PRINT(debug_mast & DB_MAST_XMIT,
			    "klips_debug:ipsec_mast_send: "
			    "suspect recursion, dev=rt->u.dst.dev=%s, dropped\n",
			    ixs->dev->name);
		return IPSEC_XMIT_RECURSDETECT;
	}
	dst_release(skb_dst(ixs->skb));
	skb_dst_set(ixs->skb, &ixs->route->u.dst);
	ixs->stats->tx_bytes += ixs->skb->len;
	if(ixs->skb->len < ixs->skb->nh.raw - ixs->skb->data) {
		ixs->stats->tx_errors++;
		printk(KERN_WARNING
		       "klips_error:ipsec_mast_send: "
		       "tried to __skb_pull nh-data=%ld, %d available.  This should never happen, please report.\n",
		       (unsigned long)(ixs->skb->nh.raw - ixs->skb->data),
		       ixs->skb->len);
		return IPSEC_XMIT_PUSHPULLERR;
	}
	__skb_pull(ixs->skb, ixs->skb->nh.raw - ixs->skb->data);

	ipsec_nf_reset(ixs->skb);

	KLIPS_PRINT(debug_mast & DB_MAST_XMIT,
		    "klips_debug:ipsec_mast_send: "
		    "...done, calling ip_send() on device:%s\n",
		    ixs->skb->dev ? ixs->skb->dev->name : "NULL");
	KLIPS_IP_PRINT(debug_mast & DB_MAST_XMIT, ixs->skb->nh.iph);
	{
		int err;

		err = NF_HOOK(PF_INET, OSW_NF_INET_LOCAL_OUT, ixs->skb, NULL, ixs->route->u.dst.dev,
			      ipsec_mast_xmit2);
		if(err != NET_XMIT_SUCCESS && err != NET_XMIT_CN) {
			if(net_ratelimit())
				printk(KERN_ERR
				       "klips_error:ipsec_mast_send: "
				       "ip_send() failed, err=%d\n", 
				       -err);
			ixs->stats->tx_errors++;
			ixs->stats->tx_aborted_errors++;
			ixs->skb = NULL;
			return IPSEC_XMIT_IPSENDFAILURE;
		}
	}
	ixs->stats->tx_packets++;
        ixs->skb = NULL;

        return IPSEC_XMIT_OK;
}
Ejemplo n.º 23
0
static int raw_sendmsg(struct sock *sk, struct msghdr *msg, int len)
{
	struct ipcm_cookie ipc;
	struct rawfakehdr rfh;
	struct rtable *rt = NULL;
	int free = 0;
	u32 daddr;
	u8  tos;
	int err;

	/* This check is ONLY to check for arithmetic overflow
	   on integer(!) len. Not more! Real check will be made
	   in ip_build_xmit --ANK

	   BTW socket.c -> af_*.c -> ... make multiple
	   invalid conversions size_t -> int. We MUST repair it f.e.
	   by replacing all of them with size_t and revise all
	   the places sort of len += sizeof(struct iphdr)
	   If len was ULONG_MAX-10 it would be cathastrophe  --ANK
	 */

	err = -EMSGSIZE;
	if (len < 0 || len > 0xFFFF)
		goto out;

	/*
	 *	Check the flags.
	 */

	err = -EOPNOTSUPP;
	if (msg->msg_flags & MSG_OOB)	/* Mirror BSD error message */
		goto out;               /* compatibility */
			 
	/*
	 *	Get and verify the address. 
	 */

	if (msg->msg_namelen) {
		struct sockaddr_in *usin = (struct sockaddr_in*)msg->msg_name;
		err = -EINVAL;
		if (msg->msg_namelen < sizeof(*usin))
			goto out;
		if (usin->sin_family != AF_INET) {
			static int complained;
			if (!complained++)
				printk(KERN_INFO "%s forgot to set AF_INET in "
						 "raw sendmsg. Fix it!\n",
						 current->comm);
			err = -EINVAL;
			if (usin->sin_family)
				goto out;
		}
		daddr = usin->sin_addr.s_addr;
		/* ANK: I did not forget to get protocol from port field.
		 * I just do not know, who uses this weirdness.
		 * IP_HDRINCL is much more convenient.
		 */
	} else {
		err = -EDESTADDRREQ;
		if (sk->state != TCP_ESTABLISHED) 
			goto out;
		daddr = sk->daddr;
	}

	ipc.addr = sk->saddr;
	ipc.opt = NULL;
	ipc.oif = sk->bound_dev_if;

	if (msg->msg_controllen) {
		err = ip_cmsg_send(msg, &ipc);
		if (err)
			goto out;
		if (ipc.opt)
			free = 1;
	}

	rfh.saddr = ipc.addr;
	ipc.addr = daddr;

	if (!ipc.opt)
		ipc.opt = sk->protinfo.af_inet.opt;

	if (ipc.opt) {
		err = -EINVAL;
		/* Linux does not mangle headers on raw sockets,
		 * so that IP options + IP_HDRINCL is non-sense.
		 */
		if (sk->protinfo.af_inet.hdrincl)
			goto done;
		if (ipc.opt->srr) {
			if (!daddr)
				goto done;
			daddr = ipc.opt->faddr;
		}
	}
	tos = RT_TOS(sk->protinfo.af_inet.tos) | sk->localroute;
	if (msg->msg_flags & MSG_DONTROUTE)
		tos |= RTO_ONLINK;

	if (MULTICAST(daddr)) {
		if (!ipc.oif)
			ipc.oif = sk->protinfo.af_inet.mc_index;
		if (!rfh.saddr)
			rfh.saddr = sk->protinfo.af_inet.mc_addr;
	}

	err = ip_route_output(&rt, daddr, rfh.saddr, tos, ipc.oif);

	if (err)
		goto done;

	err = -EACCES;
	if (rt->rt_flags & RTCF_BROADCAST && !sk->broadcast)
		goto done;

	if (msg->msg_flags & MSG_CONFIRM)
		goto do_confirm;
back_from_confirm:

	rfh.iov		= msg->msg_iov;
	rfh.saddr	= rt->rt_src;
	rfh.dst		= &rt->u.dst;
	if (!ipc.addr)
		ipc.addr = rt->rt_dst;
	err = ip_build_xmit(sk, sk->protinfo.af_inet.hdrincl ? raw_getrawfrag :
		       	    raw_getfrag, &rfh, len, &ipc, rt, msg->msg_flags);

done:
	if (free)
		kfree(ipc.opt);
	ip_rt_put(rt);

out:	return err < 0 ? err : len;

do_confirm:
	dst_confirm(&rt->u.dst);
	if (!(msg->msg_flags & MSG_PROBE) || len)
		goto back_from_confirm;
	err = 0;
	goto done;
}
Ejemplo n.º 24
0
void ipip_err(struct sk_buff *skb, unsigned char *dp, int len)
{
#ifndef I_WISH_WORLD_WERE_PERFECT

/* It is not :-( All the routers (except for Linux) return only
   8 bytes of packet payload. It means, that precise relaying of
   ICMP in the real Internet is absolutely infeasible.
 */
	struct iphdr *iph = (struct iphdr*)dp;
	int type = skb->h.icmph->type;
	int code = skb->h.icmph->code;
	struct ip_tunnel *t;

	if (len < sizeof(struct iphdr))
		return;

	switch (type) {
	default:
	case ICMP_PARAMETERPROB:
		return;

	case ICMP_DEST_UNREACH:
		switch (code) {
		case ICMP_SR_FAILED:
		case ICMP_PORT_UNREACH:
			/* Impossible event. */
			return;
		case ICMP_FRAG_NEEDED:
			/* Soft state for pmtu is maintained by IP core. */
			return;
		default:
			/* All others are translated to HOST_UNREACH.
			   rfc2003 contains "deep thoughts" about NET_UNREACH,
			   I believe they are just ether pollution. --ANK
			 */
			break;
		}
		break;
	case ICMP_TIME_EXCEEDED:
		if (code != ICMP_EXC_TTL)
			return;
		break;
	}

	t = ipip_tunnel_lookup(iph->daddr, iph->saddr);
	if (t == NULL || t->parms.iph.daddr == 0)
		return;
	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
		return;

	if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
		t->err_count++;
	else
		t->err_count = 1;
	t->err_time = jiffies;
	return;
#else
	struct iphdr *iph = (struct iphdr*)dp;
	int hlen = iph->ihl<<2;
	struct iphdr *eiph;
	int type = skb->h.icmph->type;
	int code = skb->h.icmph->code;
	int rel_type = 0;
	int rel_code = 0;
	int rel_info = 0;
	struct sk_buff *skb2;
	struct rtable *rt;

	if (len < hlen + sizeof(struct iphdr))
		return;
	eiph = (struct iphdr*)(dp + hlen);

	switch (type) {
	default:
		return;
	case ICMP_PARAMETERPROB:
		if (skb->h.icmph->un.gateway < hlen)
			return;

		/* So... This guy found something strange INSIDE encapsulated
		   packet. Well, he is fool, but what can we do ?
		 */
		rel_type = ICMP_PARAMETERPROB;
		rel_info = skb->h.icmph->un.gateway - hlen;
		break;

	case ICMP_DEST_UNREACH:
		switch (code) {
		case ICMP_SR_FAILED:
		case ICMP_PORT_UNREACH:
			/* Impossible event. */
			return;
		case ICMP_FRAG_NEEDED:
			/* And it is the only really necessary thing :-) */
			rel_info = ntohs(skb->h.icmph->un.frag.mtu);
			if (rel_info < hlen+68)
				return;
			rel_info -= hlen;
			/* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
			if (rel_info > ntohs(eiph->tot_len))
				return;
			break;
		default:
			/* All others are translated to HOST_UNREACH.
			   rfc2003 contains "deep thoughts" about NET_UNREACH,
			   I believe, it is just ether pollution. --ANK
			 */
			rel_type = ICMP_DEST_UNREACH;
			rel_code = ICMP_HOST_UNREACH;
			break;
		}
		break;
	case ICMP_TIME_EXCEEDED:
		if (code != ICMP_EXC_TTL)
			return;
		break;
	}

	/* Prepare fake skb to feed it to icmp_send */
	skb2 = skb_clone(skb, GFP_ATOMIC);
	if (skb2 == NULL)
		return;
	dst_release(skb2->dst);
	skb2->dst = NULL;
	skb_pull(skb2, skb->data - (u8*)eiph);
	skb2->nh.raw = skb2->data;

	/* Try to guess incoming interface */
	if (ip_route_output(&rt, eiph->saddr, 0, RT_TOS(eiph->tos), 0)) {
		kfree_skb(skb2);
		return;
	}
	skb2->dev = rt->u.dst.dev;

	/* route "incoming" packet */
	if (rt->rt_flags&RTCF_LOCAL) {
		ip_rt_put(rt);
		rt = NULL;
		if (ip_route_output(&rt, eiph->daddr, eiph->saddr, eiph->tos, 0) ||
		    rt->u.dst.dev->type != ARPHRD_IPGRE) {
			ip_rt_put(rt);
			kfree_skb(skb2);
			return;
		}
	} else {
		ip_rt_put(rt);
		if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
		    skb2->dst->dev->type != ARPHRD_IPGRE) {
			kfree_skb(skb2);
			return;
		}
	}

	/* change mtu on this route */
	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
		if (rel_info > skb2->dst->pmtu) {
			kfree_skb(skb2);
			return;
		}
		skb2->dst->pmtu = rel_info;
		rel_info = htonl(rel_info);
	} else if (type == ICMP_TIME_EXCEEDED) {
		struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv;
		if (t->parms.iph.ttl) {
			rel_type = ICMP_DEST_UNREACH;
			rel_code = ICMP_HOST_UNREACH;
		}
	}

	icmp_send(skb2, rel_type, rel_code, rel_info);
	kfree_skb(skb2);
	return;
#endif
}
Ejemplo n.º 25
0
static int ipip_tunnel_xmit(struct sk_buff *skb, struct device *dev)
{
	struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
	struct net_device_stats *stats = &tunnel->stat;
	struct iphdr  *tiph = &tunnel->parms.iph;
	u8     tos = tunnel->parms.iph.tos;
	u16    df = tiph->frag_off;
	struct rtable *rt;     			/* Route to the other host */
	struct device *tdev;			/* Device to other host */
	struct iphdr  *old_iph = skb->nh.iph;
	struct iphdr  *iph;			/* Our new IP header */
	int    max_headroom;			/* The extra header space needed */
	u32    dst = tiph->daddr;
	int    mtu;

	if (tunnel->recursion++) {
		tunnel->stat.collisions++;
		goto tx_error;
	}

	if (skb->protocol != __constant_htons(ETH_P_IP))
		goto tx_error;

	if (tos&1)
		tos = old_iph->tos;

	if (!dst) {
		/* NBMA tunnel */
		if ((rt = (struct rtable*)skb->dst) == NULL) {
			tunnel->stat.tx_fifo_errors++;
			goto tx_error;
		}
		if ((dst = rt->rt_gateway) == 0)
			goto tx_error_icmp;
	}

	if (ip_route_output(&rt, dst, tiph->saddr, RT_TOS(tos), tunnel->parms.link)) {
		tunnel->stat.tx_carrier_errors++;
		goto tx_error_icmp;
	}
	tdev = rt->u.dst.dev;

	if (tdev == dev) {
		ip_rt_put(rt);
		tunnel->stat.collisions++;
		goto tx_error;
	}

	mtu = rt->u.dst.pmtu - sizeof(struct iphdr);
	if (mtu < 68) {
		tunnel->stat.collisions++;
		ip_rt_put(rt);
		goto tx_error;
	}
	if (skb->dst && mtu < skb->dst->pmtu)
		skb->dst->pmtu = mtu;

	df |= (old_iph->frag_off&__constant_htons(IP_DF));

	if ((old_iph->frag_off&__constant_htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
		ip_rt_put(rt);
		goto tx_error;
	}

	if (tunnel->err_count > 0) {
		if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
			tunnel->err_count--;
			dst_link_failure(skb);
		} else
			tunnel->err_count = 0;
	}

	skb->h.raw = skb->nh.raw;

	/*
	 * Okay, now see if we can stuff it in the buffer as-is.
	 */
	max_headroom = (((tdev->hard_header_len+15)&~15)+sizeof(struct iphdr));

	if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
		if (!new_skb) {
			ip_rt_put(rt);
  			stats->tx_dropped++;
			dev_kfree_skb(skb);
			tunnel->recursion--;
			return 0;
		}
		if (skb->sk)
			skb_set_owner_w(new_skb, skb->sk);
		dev_kfree_skb(skb);
		skb = new_skb;
	}

	skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
	dst_release(skb->dst);
	skb->dst = &rt->u.dst;

	/*
	 *	Push down and install the IPIP header.
	 */

	iph 			=	skb->nh.iph;
	iph->version		=	4;
	iph->ihl		=	sizeof(struct iphdr)>>2;
	iph->frag_off		=	df;
	iph->protocol		=	IPPROTO_IPIP;
	iph->tos		=	tos;
	iph->daddr		=	rt->rt_dst;
	iph->saddr		=	rt->rt_src;

	if ((iph->ttl = tiph->ttl) == 0)
		iph->ttl	=	old_iph->ttl;

	iph->tot_len		=	htons(skb->len);
	iph->id			=	htons(ip_id_count++);
	ip_send_check(iph);

	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	ip_send(skb);
	tunnel->recursion--;
	return 0;

tx_error_icmp:
	dst_link_failure(skb);
tx_error:
	stats->tx_errors++;
	dev_kfree_skb(skb);
	tunnel->recursion--;
	return 0;
}
Ejemplo n.º 26
0
static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
	struct net_device_stats *stats = &tunnel->stat;
	struct iphdr  *tiph = &tunnel->parms.iph;
	u8     tos = tunnel->parms.iph.tos;
	u16    df = tiph->frag_off;
	struct rtable *rt;     			/* Route to the other host */
	struct net_device *tdev;			/* Device to other host */
	struct iphdr  *old_iph = skb->nh.iph;
#ifdef CONFIG_NET_IPIP_IPV6
	struct ipv6hdr *iph6 = skb->nh.ipv6h;
#endif
	struct iphdr  *iph;			/* Our new IP header */
	int    max_headroom;			/* The extra header space needed */
	u32    dst = tiph->daddr;
	int    mtu;
	u8 protocol = 0; 

	switch (skb->protocol) {
	case __constant_htons(ETH_P_IP):
		protocol = IPPROTO_IPIP;
		break;
#ifdef CONFIG_NET_IPIP_IPV6
	case __constant_htons(ETH_P_IPV6):
		protocol = IPPROTO_IPV6;
		break;
#endif
	}

	if (tunnel->recursion++) {
		tunnel->stat.collisions++;
		goto tx_error;
	}

	switch(skb->protocol) {
	case __constant_htons(ETH_P_IP):
		if (tunnel->parms.iph.protocol &&
		    tunnel->parms.iph.protocol != IPPROTO_IPIP)
			goto tx_error;
		if (tos&1)
			tos = old_iph->tos;
		break;
#ifdef CONFIG_NET_IPIP_IPV6
	case __constant_htons(ETH_P_IPV6):
		if (tunnel->parms.iph.protocol &&
		    tunnel->parms.iph.protocol != IPPROTO_IPV6)
			goto tx_error;
		break;
#endif
	default:
		goto tx_error;
	}

	if (!dst) {
		switch(skb->protocol){
		case __constant_htons(ETH_P_IP):
			/* NBMA tunnel */
			if ((rt = (struct rtable*)skb->dst) == NULL) {
				tunnel->stat.tx_fifo_errors++;
				goto tx_error;
			}
			dst = rt->rt_gateway;
			break;
#ifdef CONFIG_NET_IPIP_IPV6
		case __constant_htons(ETH_P_IPV6):
		    {
			struct in6_addr *addr6 = &iph6->daddr;
			if (addr6->s6_addr16[0] == htons(0x2002)) {
				memcpy(&dst, &addr6->s6_addr16[1], 4);
			} else {
				/* dst is zero */
				struct neighbour *neigh = NULL;
				if (skb->dst)
					neigh = skb->dst->neighbour;
				if (neigh == NULL) {
					printk(KERN_DEBUG "tunl: nexthop == NULL\n");
					goto tx_error;
				}
				addr6 = (struct in6_addr*)&neigh->primary_key;
				if (IN6_IS_ADDR_UNSPECIFIED(addr6))
					addr6 = &skb->nh.ipv6h->daddr;
				if (IN6_IS_ADDR_V4COMPAT(addr6))
					dst = addr6->s6_addr32[3];
#ifdef CONFIG_IPV6_6TO4_NEXTHOP
				else if (addr6->s6_addr16[0] == htons(0x2002)) 
					memcpy(&dst, &addr6->s6_addr16[1], 4);
#endif
				else
					goto tx_error_icmp;
			}
			break;
		    }
#endif
		}
		if (!dst)
			goto tx_error_icmp;
	}

	if (ip_route_output(&rt, dst, tiph->saddr, RT_TOS(tos), tunnel->parms.link)) {
		tunnel->stat.tx_carrier_errors++;
		goto tx_error_icmp;
	}
	tdev = rt->u.dst.dev;

	if (tdev == dev) {
		ip_rt_put(rt);
		tunnel->stat.collisions++;
		goto tx_error;
	}

	if (tiph->frag_off)
		mtu = rt->u.dst.pmtu - sizeof(struct iphdr);
	else
		mtu = skb->dst ? skb->dst->pmtu : dev->mtu;

	if (mtu < 68) {
		tunnel->stat.collisions++;
		ip_rt_put(rt);
		goto tx_error;
	}

	switch(skb->protocol){
	case __constant_htons(ETH_P_IP):
		if (skb->dst && mtu < skb->dst->pmtu)
			skb->dst->pmtu = mtu;

		df |= (old_iph->frag_off&htons(IP_DF));

		if ((old_iph->frag_off&htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
			ip_rt_put(rt);
			goto tx_error;
		}
		break;

#ifdef CONFIG_NET_IPIP_IPV6
	case __constant_htons(ETH_P_IPV6):
#if 0
		if (mtu < IPV6_MIN_MTU) {
			/* XXX: too small; we should fragment this packet? */
			tunnel->stat.tx_carrier_errors++;
			goto tx_error_icmp;
		}
#endif
		if (skb->len > mtu && mtu > IPV6_MIN_MTU) {
			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
			ip_rt_put(rt);
			goto tx_error;
		}
		df = mtu > IPV6_MIN_MTU ? htons(IP_DF) : 0;
		break;
#endif
	}
	if (tunnel->err_count > 0) {
		if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
			tunnel->err_count--;
			dst_link_failure(skb);
		} else
			tunnel->err_count = 0;
	}

	/*
	 * Okay, now see if we can stuff it in the buffer as-is.
	 */
	max_headroom = (((tdev->hard_header_len+15)&~15)+sizeof(struct iphdr));
	if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
		if (!new_skb) {
			ip_rt_put(rt);
  			stats->tx_dropped++;
			dev_kfree_skb(skb);
			tunnel->recursion--;
			return 0;
		}
		if (skb->sk)
			skb_set_owner_w(new_skb, skb->sk);
		dev_kfree_skb(skb);
		skb = new_skb;
		old_iph = skb->nh.iph;
	}

	skb->h.raw = skb->nh.raw;
	skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
	dst_release(skb->dst);
	skb->dst = &rt->u.dst;

	/*
	 *	Push down and install the IPIP header.
	 */

	iph 			=	skb->nh.iph;
	iph->version		=	4;
	iph->ihl		=	sizeof(struct iphdr)>>2;
	iph->daddr		=	rt->rt_dst;
	iph->saddr		=	rt->rt_src;

	iph->ttl		=	tiph->ttl;
	iph->frag_off		=	df;

	switch(skb->protocol){
	case __constant_htons(ETH_P_IP):
		iph->protocol	=	protocol;
		iph->tos	=	INET_ECN_encapsulate(tos, old_iph->tos);
		if (iph->ttl == 0)
			iph->ttl =	old_iph->ttl;
		break;
#ifdef CONFIG_NET_IPIP_IPV6
	case __constant_htons(ETH_P_IPV6):
		iph->protocol	=	protocol;
		iph->tos	=	INET_ECN_encapsulate(tos, ip6_get_dsfield(iph6));
		if (iph->ttl == 0)
			iph->ttl =	iph6->hop_limit;
		break;
#endif
	}

	nf_reset(skb);

	IPTUNNEL_XMIT();
	tunnel->recursion--;
	return 0;

tx_error_icmp:
	dst_link_failure(skb);
tx_error:
	stats->tx_errors++;
	dev_kfree_skb(skb);
	tunnel->recursion--;
	return 0;
}
Ejemplo n.º 27
0
/* Get route to destination or remote server */
static struct rtable *
__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                   __be32 daddr, u32 rtos, int rt_mode)
{
    struct net *net = dev_net(skb_dst(skb)->dev);
    struct rtable *rt;			/* Route to the other host */
    struct rtable *ort;			/* Original route */
    int local;

    if (dest) {
        spin_lock(&dest->dst_lock);
        if (!(rt = (struct rtable *)
                   __ip_vs_dst_check(dest, rtos))) {
            rt = ip_route_output(net, dest->addr.ip, 0, rtos, 0);
            if (IS_ERR(rt)) {
                spin_unlock(&dest->dst_lock);
                IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
                             &dest->addr.ip);
                return NULL;
            }
            __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
            IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
                      &dest->addr.ip,
                      atomic_read(&rt->dst.__refcnt), rtos);
        }
        spin_unlock(&dest->dst_lock);
    } else {
        rt = ip_route_output(net, daddr, 0, rtos, 0);
        if (IS_ERR(rt)) {
            IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
                         &daddr);
            return NULL;
        }
    }

    local = rt->rt_flags & RTCF_LOCAL;
    if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
            rt_mode)) {
        IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
                     (rt->rt_flags & RTCF_LOCAL) ?
                     "local":"non-local", &rt->rt_dst);
        ip_rt_put(rt);
        return NULL;
    }
    if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
            !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
        IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
                     "requires NAT method, dest: %pI4\n",
                     &ip_hdr(skb)->daddr, &rt->rt_dst);
        ip_rt_put(rt);
        return NULL;
    }
    if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
        IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
                     "to non-local address, dest: %pI4\n",
                     &ip_hdr(skb)->saddr, &rt->rt_dst);
        ip_rt_put(rt);
        return NULL;
    }

    return rt;
}
Ejemplo n.º 28
0
void icmp_send(struct sk_buff *skb_in, int type, int code, unsigned long info)
{
	struct iphdr *iph;
	struct icmphdr *icmph;
	int room;
	struct icmp_bxm icmp_param;
	struct rtable *rt = (struct rtable*)skb_in->dst;
	struct ipcm_cookie ipc;
	u32 saddr;
	u8  tos;
	
	/*
	 *	Find the original header
	 */
	 
	iph = skb_in->nh.iph;
	
	/*
	 *	No replies to physical multicast/broadcast
	 */
	 
	if (skb_in->pkt_type!=PACKET_HOST)
		return;
		
	/*
	 *	Now check at the protocol level
	 */
	if (!rt) {
		if (sysctl_ip_always_defrag == 0 &&
		    net_ratelimit())
			printk(KERN_DEBUG "icmp_send: destinationless packet\n");
		return;
	}
	if (rt->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST))
		return;
	 
		
	/*
	 *	Only reply to fragment 0. We byte re-order the constant
	 *	mask for efficiency.
	 */
	 
	if (iph->frag_off&htons(IP_OFFSET))
		return;
		
	/* 
	 *	If we send an ICMP error to an ICMP error a mess would result..
	 */
	 
	if (icmp_pointers[type].error) {
		/*
		 *	We are an error, check if we are replying to an ICMP error
		 */
		 
		if (iph->protocol==IPPROTO_ICMP) {
			icmph = (struct icmphdr *)((char *)iph + (iph->ihl<<2));
			/*
			 *	Assume any unknown ICMP type is an error. This isn't
			 *	specified by the RFC, but think about it..
			 */
			if (icmph->type>NR_ICMP_TYPES || icmp_pointers[icmph->type].error)
				return;
		}
	}


	/*
	 *	Construct source address and options.
	 */

#ifdef CONFIG_IP_ROUTE_NAT	
	/*
	 *	Restore original addresses if packet has been translated.
	 */
	if (rt->rt_flags&RTCF_NAT && IPCB(skb_in)->flags&IPSKB_TRANSLATED) {
		iph->daddr = rt->key.dst;
		iph->saddr = rt->key.src;
	}
#endif
#ifdef CONFIG_IP_MASQUERADE
	if (type==ICMP_DEST_UNREACH && IPCB(skb_in)->flags&IPSKB_MASQUERADED) {
			ip_fw_unmasq_icmp(skb_in);
	}
#endif

	saddr = iph->daddr;
	if (!(rt->rt_flags & RTCF_LOCAL))
		saddr = 0;

	tos = icmp_pointers[type].error ?
		((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) :
			iph->tos;

	/* XXX: use a more aggressive expire for routes created by 
	 * this call (not longer than the rate limit timeout). 
	 * It could be also worthwhile to not put them into ipv4
	 * fast routing cache at first. Otherwise an attacker can
	 * grow the routing table.
	 */
	if (ip_route_output(&rt, iph->saddr, saddr, RT_TOS(tos), 0))
		return;
	
	if (ip_options_echo(&icmp_param.replyopts, skb_in)) 
		goto ende;


	/*
	 *	Prepare data for ICMP header.
	 */

	icmp_param.icmph.type=type;
	icmp_param.icmph.code=code;
	icmp_param.icmph.un.gateway = info;
	icmp_param.icmph.checksum=0;
	icmp_param.csum=0;
	icmp_param.data_ptr=iph;
	icmp_out_count(icmp_param.icmph.type);
	icmp_socket->sk->ip_tos = tos;
	ipc.addr = iph->saddr;
	ipc.opt = &icmp_param.replyopts;
	if (icmp_param.replyopts.srr) {
		ip_rt_put(rt);
		if (ip_route_output(&rt, icmp_param.replyopts.faddr, saddr, RT_TOS(tos), 0))
			return;
	}

	if (!icmpv4_xrlim_allow(rt, type, code))
		goto ende;

	/* RFC says return as much as we can without exceeding 576 bytes. */

	room = rt->u.dst.pmtu;
	if (room > 576)
		room = 576;
	room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen;
	room -= sizeof(struct icmphdr);

	icmp_param.data_len=(iph->ihl<<2)+skb_in->len;
	if (icmp_param.data_len > room)
		icmp_param.data_len = room;
	
	ip_build_xmit(icmp_socket->sk, icmp_glue_bits, &icmp_param, 
		icmp_param.data_len+sizeof(struct icmphdr),
		&ipc, rt, MSG_DONTWAIT);

ende:
	ip_rt_put(rt);
}