Esempio n. 1
0
static void igmp_send_report(struct device *dev, unsigned long address, int type)
{
	struct sk_buff *skb=alloc_skb(MAX_IGMP_SIZE, GFP_ATOMIC);
	int tmp;
	struct igmphdr *ih;

	if(skb==NULL)
		return;
	if (type != IGMP_HOST_LEAVE_MESSAGE)
		tmp=ip_build_header(skb, dev->pa_addr, address, &dev, IPPROTO_IGMP, NULL,
			28 , 0, 1, NULL);
	else
		tmp=ip_build_header(skb, dev->pa_addr, IGMP_ALL_ROUTER, &dev, IPPROTO_IGMP, NULL,
			28, 0, 1, NULL);
	if(tmp<0)
	{
		kfree_skb(skb, FREE_WRITE);
		return;
	}
	ih=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr));
	ih->type=type;
	ih->code=0;
	ih->csum=0;
	ih->group=address;
	ih->csum=ip_compute_csum((void *)ih,sizeof(struct igmphdr));	/* Checksum fill */
	ip_queue_xmit(NULL,dev,skb,1);
}
Esempio n. 2
0
/* sends an icmp message in response to a packet. */
void
icmp_reply (struct sk_buff *skb_in,  int type, int code, struct device *dev)
{
   struct sk_buff *skb;
   struct ip_header *iph;
   int offset;
   struct icmp_header *icmph;

   int len;
   /* get some memory for the replay. */
   len = sizeof (*skb) + 8 /* amount of header to return. */ +
         sizeof (struct icmp_header) +
	 64 /* enough for an ip header. */ +
	 dev->hard_header_len;
	   
   skb = malloc (len);
   if (skb == NULL) return;

   skb->mem_addr = skb;
   skb->mem_len = len;

   len -= sizeof (*skb);

   /* find the ip header. */
   iph = (struct ip_header *)(skb_in+1);
   iph = (struct ip_header *)((unsigned char *)iph + dev->hard_header_len);

   /* Build Layer 2-3 headers for message back to source */
   offset = ip_build_header( skb, iph->daddr, iph->saddr,
			    &dev, IP_ICMP, NULL, len );

   if (offset < 0)
     {
	skb->sk = NULL;
	free_skb (skb, FREE_READ);
	return;
     }

   /* Readjust length according to actual IP header size */
   skb->len = offset + sizeof (struct icmp_header) + 8;
   
   icmph = (struct icmp_header *)((unsigned char *)(skb+1) + offset);
   icmph->type = type;
   icmph->code = code;
   icmph->checksum = 0; /* we don't need to compute this. */
   icmph->un.gateway = 0; /* might as well 0 it. */
   memcpy (icmph+1, iph+1, 8);
   /* send it and free it. */
   ip_queue_xmit (NULL, dev, skb, 1);
   
}
Esempio n. 3
0
static void ip_loopback(struct device *old_dev, struct sk_buff *skb)
{
	struct device *dev=&loopback_dev;
	int len=ntohs(skb->ip_hdr->tot_len);
	struct sk_buff *newskb=dev_alloc_skb(len+dev->hard_header_len+15);
	
	if(newskb==NULL)
		return;
		
	newskb->link3=NULL;
	newskb->sk=NULL;
	newskb->dev=dev;
	newskb->saddr=skb->saddr;
	newskb->daddr=skb->daddr;
	newskb->raddr=skb->raddr;
	newskb->free=1;
	newskb->lock=0;
	newskb->users=0;
	newskb->pkt_type=skb->pkt_type;
	
	/*
	 *	Put a MAC header on the packet
	 */
	ip_send(NULL,newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr);
	/*
	 *	Add the rest of the data space.	
	 */
	newskb->ip_hdr=(struct iphdr *)skb_put(newskb, len);
	memcpy(newskb->proto_priv, skb->proto_priv, sizeof(skb->proto_priv));

	/*
	 *	Copy the data
	 */
	memcpy(newskb->ip_hdr,skb->ip_hdr,len);

	/* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */
		
	/*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/
	ip_queue_xmit(NULL, dev, newskb, 2);
}
Esempio n. 4
0
static void igmp_send_report(struct device *dev, unsigned long address, int type)
{
	struct sk_buff *skb=alloc_skb(MAX_IGMP_SIZE, GFP_ATOMIC);
	int tmp;
	struct igmphdr *igh;
	
	if(skb==NULL)
		return;
	tmp=ip_build_header(skb, INADDR_ANY, address, &dev, IPPROTO_IGMP, NULL,
				skb->mem_len, 0, 1);
	if(tmp<0)
	{
		kfree_skb(skb, FREE_WRITE);
		return;
	}
	igh=(struct igmphdr *)(skb->data+tmp);
	skb->len=tmp+sizeof(*igh);
	igh->csum=0;
	igh->unused=0;
	igh->type=type;
	igh->group=address;
	igh->csum=ip_compute_csum((void *)igh,sizeof(*igh));
	ip_queue_xmit(NULL,dev,skb,1);
}
Esempio n. 5
0
int
icmp_rcv(struct sk_buff *skb1, struct device *dev, struct options *opt,
	unsigned long daddr, unsigned short len,
	unsigned long saddr, int redo, struct ip_protocol *protocol )
{
   int size, offset;
   struct icmp_header *icmph, *icmphr;
   struct sk_buff *skb;
   unsigned char *buff;


   /* drop broadcast packets.  */
   if ((daddr & 0xff000000) == 0 || (daddr & 0xff000000) == 0xff000000)
     {
	skb1->sk = NULL;
	free_skb (skb1, FREE_READ);
	return (0);
     }

   buff = skb1->h.raw;

   icmph = (struct icmp_header *)buff;

   /* Validate the packet first */
   if( icmph->checksum )
     { /* Checksums Enabled? */
	if( ip_compute_csum( (unsigned char *)icmph, len ) )
	  {
	     /* Failed checksum! */
	     PRINTK("\nICMP ECHO failed checksum!");
	     skb1->sk = NULL;
	     free_skb (skb1, FREE_READ);
	     return (0);
	  }
     }

   print_icmph(icmph);

   /* Parse the ICMP message */
   switch( icmph->type )
     {
       case ICMP_DEST_UNREACH:
       case ICMP_SOURCE_QUENCH:
	{
	   struct ip_header *iph;
	   struct ip_protocol *ipprot;
	   unsigned char hash;
	   int err;

	   err = icmph->type << 8 | icmph->code;

	   /* we need to cause the socket to be closed and the error message
	      to be set appropriately. */
	   iph = (struct ip_header *)(icmph+1);

	   /* get the protocol(s) */
	   hash = iph->protocol & (MAX_IP_PROTOS -1 );
	   for (ipprot = ip_protos[hash]; ipprot != NULL; ipprot=ipprot->next)
	     {
		/* pass it off to everyone who wants it. */
		ipprot->err_handler (err, (unsigned char *)iph+4*iph->ihl,
				     iph->daddr, iph->saddr, ipprot);
	     }
	   skb1->sk = NULL;
	   free_skb (skb1, FREE_READ);
	   return (0);
	}

       case ICMP_REDIRECT:
	{
	   /* we need to put a new route in the routing table. */
	   struct rtable *rt; /* we will add a new route. */
	   struct ip_header *iph;

	   iph = (struct ip_header *)(icmph+1);
	   rt = malloc (sizeof (*rt));
	   if (rt != NULL)
	     {
		rt->net = iph->daddr;
		/* assume class C network.  Technically this is incorrect,
		   but will give it a try. */
		if ((icmph->code & 1) == 0) rt->net &= 0x00ffffff;
		rt->dev = dev;
		rt->router = icmph->un.gateway;
		add_route (rt);
	     }
	   skb1->sk = NULL;
	   free_skb (skb1, FREE_READ);
	   return (0);
	}

       case ICMP_ECHO: 
	
	/* Allocate an sk_buff response buffer (assume 64 byte IP header) */

	size = sizeof( struct sk_buff ) + dev->hard_header_len + 64 + len;
	skb = malloc( size );
	if (skb == NULL)
	  {
	     skb1->sk = NULL;
	     free_skb (skb1, FREE_READ);
	     return (0);
	  }
	skb->sk = NULL;
	skb->mem_addr = skb;
	skb->mem_len = size;

	/* Build Layer 2-3 headers for message back to source */
	offset = ip_build_header( skb, daddr, saddr, &dev, IP_ICMP, opt, len );
	if (offset < 0)
	  {
	     /* Problems building header */
	     PRINTK("\nCould not build IP Header for ICMP ECHO Response");
	     free_s (skb->mem_addr, skb->mem_len);
	     skb1->sk = NULL;
	     free_skb (skb1, FREE_READ);
	     return( 0 ); /* just toss the received packet */
	  }

	/* Readjust length according to actual IP header size */
	skb->len = offset + len;

	/* Build ICMP_ECHO Response message */
	icmphr = (struct icmp_header *)( (char *)( skb + 1 ) + offset );
	memcpy( (char *)icmphr, (char *)icmph, len );
	icmphr->type = ICMP_ECHOREPLY;
	icmphr->code = 0;
	icmphr->checksum = 0;

	if( icmph->checksum )
	  { /* Calculate Checksum */
	     icmphr->checksum = ip_compute_csum( (void *)icmphr, len );
	  }

	/* Ship it out - free it when done */
	ip_queue_xmit( (volatile struct sock *)NULL, dev, skb, 1 );
	
	skb1->sk = NULL;
	free_skb (skb1, FREE_READ);
	return( 0 );

	default:
	PRINTK("\nUnsupported ICMP type = x%x", icmph->type );
	skb1->sk = NULL;
	free_skb (skb1, FREE_READ);
	return( 0 ); /* just toss the packet */
     }

   /* should be unecessary, but just in case. */
   skb1->sk = NULL;
   free_skb (skb1, FREE_READ);
   return( 0 ); /* just toss the packet */
}
Esempio n. 6
0
static int
mptp_sendmsg(struct kiocb *iocb, struct socket *sock,
	     struct msghdr *msg, size_t len)
{
	int err;
	uint16_t dport;
	__be32 daddr;
	__be32 saddr;
	uint16_t sport;
	struct sk_buff *skb;
	struct sock *sk;
	struct inet_sock *isk;
	struct mptp_sock *ssk;
	struct mptphdr *shdr;
	int connected = 0;
	int totlen;
	struct rtable *rt = NULL;
	int dests = 0;
	int i;
	struct sockaddr_mptp *mptp_addr = NULL;
	int ret = 0;

	if (unlikely(sock == NULL)) {
		log_error("Sock is NULL\n");
		err = -EINVAL;
		goto out;
	}
	sk = sock->sk;

	if (unlikely(sk == NULL)) {
		log_error("Sock->sk is NULL\n");
		err = -EINVAL;
		goto out;
	}

	isk = inet_sk(sk);
	ssk = mptp_sk(sk);

	sport = ssk->src;
	saddr = isk->inet_saddr;

	if (sport == 0) {
		sport = get_next_free_port();
		if (unlikely(sport == 0)) {
			log_error("No free ports\n");
			err = -ENOMEM;
			goto out;
		}
	}

	if (msg->msg_name) {
		mptp_addr = (struct sockaddr_mptp *)msg->msg_name;

		if (unlikely
		    (msg->msg_namelen <
		     sizeof(*mptp_addr) +
		     mptp_addr->count * sizeof(struct mptp_dest)
		     || mptp_addr->count <= 0)) {
			log_error
			    ("Invalid size for msg_name (size=%u, addr_count=%u)\n",
			     msg->msg_namelen, mptp_addr->count);
			err = -EINVAL;
			goto out;
		}

		dests = mptp_addr->count;
	} else {
		BUG();
		if (unlikely(!ssk->dst || !isk->inet_daddr)) {
			log_error("No destination port/address\n");
			err = -EDESTADDRREQ;
			goto out;
		}
		dport = ssk->dst;
		daddr = isk->inet_daddr;

		log_debug
		    ("Got from socket destination port=%u and address=%u\n",
		     dport, daddr);
		connected = 1;
	}

	if (msg->msg_iovlen < dests)
		dests = msg->msg_iovlen;

	for (i = 0; i < dests; i++) {
		struct mptp_dest *dest = &mptp_addr->dests[i];
		struct iovec *iov = &msg->msg_iov[i];
		char *payload;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
		struct flowi fl = {};
#endif

		dport = ntohs(dest->port);
		if (unlikely(dport == 0 || dport >= MAX_MPTP_PORT)) {
			log_error("Invalid value for destination port(%u)\n",
				  dport);
			err = -EINVAL;
			goto out;
		}

		daddr = dest->addr;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
		fl.u.ip4.saddr = saddr;
		fl.u.ip4.daddr = daddr;
		fl.flowi_proto = sk->sk_protocol;
		fl.flowi_flags = inet_sk_flowi_flags(sk);
#endif

		log_debug
		    ("Received from user space destination port=%u and address=%u\n",
		     dport, daddr);

		len = iov->iov_len;
		totlen = len + sizeof(struct mptphdr) + sizeof(struct iphdr);
		skb =
		    sock_alloc_send_skb(sk, totlen,
					msg->msg_flags & MSG_DONTWAIT, &err);
		if (unlikely(!skb)) {
			log_error("sock_alloc_send_skb failed\n");
			goto out;
		}
		log_debug("Allocated %u bytes for skb (payload size=%u)\n",
			  totlen, len);

		skb_reset_network_header(skb);
		skb_reserve(skb, sizeof(struct iphdr));
		log_debug("Reseted network header\n");
		skb_reset_transport_header(skb);
		skb_put(skb, sizeof(struct mptphdr));
		log_debug("Reseted transport header\n");

		shdr = (struct mptphdr *)skb_transport_header(skb);
		shdr->dst = htons(dport);
		shdr->src = htons(sport);
		shdr->len = htons(len + sizeof(struct mptphdr));

		payload = skb_put(skb, len);
		log_debug("payload=%p\n", payload);

		err =
		    skb_copy_datagram_from_iovec(skb, sizeof(struct mptphdr),
						 iov, 0, len);
		if (unlikely(err)) {
			log_error("skb_copy_datagram_from_iovec failed\n");
			goto out_free;
		}
		log_debug("Copied %u bytes into the skb\n", len);

		if (connected)
			rt = (struct rtable *)__sk_dst_check(sk, 0);

		if (rt == NULL) {
			log_debug("rt == NULL\n");
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
			struct flowi fl = {.fl4_dst = daddr,
				.proto = sk->sk_protocol,
				.flags = inet_sk_flowi_flags(sk),
			};
			err =
			    ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0);
			if (unlikely(err)) {
				log_error("Route lookup failed\n");
				goto out_free;
			}
#else
			rt = ip_route_output_flow(sock_net(sk), &fl.u.ip4, sk);
			log_debug("rt = %p\n", rt);
			if (IS_ERR(rt)) {
				log_error("Route lookup failed\n");
				goto out_free;
			}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
			sk_dst_set(sk, dst_clone(&rt->u.dst));
#else
			sk_dst_set(sk, dst_clone(&rt->dst));
#endif
		}
		log_debug("rt != NULL\n");

		skb->local_df = 1;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
		err = ip_queue_xmit(skb);
#else
		err = ip_queue_xmit(skb, &fl);
#endif
		if (likely(!err)) {
			log_debug("Sent %u bytes on wire\n", len);
			ret += len;
			dest->bytes = len;
		} else {
			log_error("ip_queue_xmit failed\n");
			dest->bytes = -1;
		}
	}

	return ret;

 out_free:
	kfree(skb);

 out:
	return err;
}
Esempio n. 7
0
/*
 * All SKB's seen here are completely headerless. It is our
 * job to build the DCCP header, and pass the packet down to
 * IP so it can do the same plus pass the packet off to the
 * device.
 */
int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
{
	if (likely(skb != NULL)) {
		const struct inet_sock *inet = inet_sk(sk);
		struct dccp_sock *dp = dccp_sk(sk);
		struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
		struct dccp_hdr *dh;
		/* XXX For now we're using only 48 bits sequence numbers */
		const int dccp_header_size = sizeof(*dh) +
					     sizeof(struct dccp_hdr_ext) +
					  dccp_packet_hdr_len(dcb->dccpd_type);
		int err, set_ack = 1;
		u64 ackno = dp->dccps_gsr;

		dccp_inc_seqno(&dp->dccps_gss);

		switch (dcb->dccpd_type) {
		case DCCP_PKT_DATA:
			set_ack = 0;
			break;
		case DCCP_PKT_SYNC:
		case DCCP_PKT_SYNCACK:
			ackno = dcb->dccpd_seq;
			break;
		}

		dcb->dccpd_seq = dp->dccps_gss;
		dccp_insert_options(sk, skb);
		
		skb->h.raw = skb_push(skb, dccp_header_size);
		dh = dccp_hdr(skb);

		if (!skb->sk)
			skb_set_owner_w(skb, sk);

		/* Build DCCP header and checksum it. */
		memset(dh, 0, dccp_header_size);
		dh->dccph_type	= dcb->dccpd_type;
		dh->dccph_sport	= inet->sport;
		dh->dccph_dport	= inet->dport;
		dh->dccph_doff	= (dccp_header_size + dcb->dccpd_opt_len) / 4;
		dh->dccph_ccval	= dcb->dccpd_ccval;
		/* XXX For now we're using only 48 bits sequence numbers */
		dh->dccph_x	= 1;

		dp->dccps_awh = dp->dccps_gss;
		dccp_hdr_set_seq(dh, dp->dccps_gss);
		if (set_ack)
			dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);

		switch (dcb->dccpd_type) {
		case DCCP_PKT_REQUEST:
			dccp_hdr_request(skb)->dccph_req_service =
							dp->dccps_service;
			break;
		case DCCP_PKT_RESET:
			dccp_hdr_reset(skb)->dccph_reset_code =
							dcb->dccpd_reset_code;
			break;
		}

		dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr,
						      inet->daddr);

		if (set_ack)
			dccp_event_ack_sent(sk);

		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);

		memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
		err = ip_queue_xmit(skb, 0);
		if (err <= 0)
			return err;

		/* NET_XMIT_CN is special. It does not guarantee,
		 * that this packet is lost. It tells that device
		 * is about to start to drop packets or already
		 * drops some packets of the same priority and
		 * invokes us to send less aggressively.
		 */
		return err == NET_XMIT_CN ? 0 : err;
	}
	return -ENOBUFS;
}
Esempio n. 8
0
void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
{
	struct iphdr *iph;
	unsigned char *raw;
	unsigned char *ptr;
	struct sk_buff *skb2;
	int left, mtu, hlen, len;
	int offset;
	
	unsigned short true_hard_header_len;

	/*
	 *	Point into the IP datagram header.
	 */

	raw = skb->data;
#if 0
	iph = (struct iphdr *) (raw + dev->hard_header_len);	
	skb->ip_hdr = iph;
#else
	iph = skb->ip_hdr;
#endif

	/*
	 * Calculate the length of the link-layer header appended to
	 * the IP-packet.
	 */
	true_hard_header_len = ((unsigned char *)iph) - raw;

	/*
	 *	Setup starting values.
	 */

	hlen = iph->ihl * 4;
	left = ntohs(iph->tot_len) - hlen;	/* Space per frame */
	hlen += true_hard_header_len;
	mtu = (dev->mtu - hlen);		/* Size of data space */
	ptr = (raw + hlen);			/* Where to start from */

	/*
	 *	Check for any "DF" flag. [DF means do not fragment]
	 */

	if (iph->frag_off & htons(IP_DF))
	{
		ip_statistics.IpFragFails++;
		NETDEBUG(printk("ip_queue_xmit: frag needed\n"));
		return;
	}

	/*
	 *	The protocol doesn't seem to say what to do in the case that the
	 *	frame + options doesn't fit the mtu. As it used to fall down dead
	 *	in this case we were fortunate it didn't happen
	 */

	if(mtu<8)
	{
		/* It's wrong but it's better than nothing */
		icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,htons(dev->mtu), dev);
		ip_statistics.IpFragFails++;
		return;
	}

	/*
	 *	Fragment the datagram.
	 */

	/*
	 *	The initial offset is 0 for a complete frame. When
	 *	fragmenting fragments it's wherever this one starts.
	 */

	if (is_frag & 2)
		offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
	else
		offset = 0;


	/*
	 *	Keep copying data until we run out.
	 */

	while(left > 0)
	{
		len = left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > mtu)
			len = mtu;
		/* IF: we are not sending upto and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < left)
		{
			len/=8;
			len*=8;
		}
		/*
		 *	Allocate buffer.
		 */

		if ((skb2 = alloc_skb(len + hlen+15,GFP_ATOMIC)) == NULL)
		{
			NETDEBUG(printk("IP: frag: no memory for new fragment!\n"));
			ip_statistics.IpFragFails++;
			return;
		}

		/*
		 *	Set up data on packet
		 */

		skb2->arp = skb->arp;
		skb2->protocol = htons(ETH_P_IP); /* Atleast PPP needs this */
#if 0		
		if(skb->free==0)
			printk(KERN_ERR "IP fragmenter: BUG free!=1 in fragmenter\n");
#endif			
		skb2->free = 1;
		skb_put(skb2,len + hlen);
		skb2->h.raw=(char *) skb2->data;
		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */

		if (sk)
		{
			atomic_add(skb2->truesize, &sk->wmem_alloc);
			skb2->sk=sk;
		}
		skb2->raddr = skb->raddr;	/* For rebuild_header - must be here */

		/*
		 *	Copy the packet header into the new buffer.
		 */

		memcpy(skb2->h.raw, raw, hlen);

		/*
		 *	Copy a block of the IP datagram.
		 */
		memcpy(skb2->h.raw + hlen, ptr, len);
		left -= len;

		skb2->h.raw+=true_hard_header_len;

		/*
		 *	Fill in the new header fields.
		 */
		iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
		iph->frag_off = htons((offset >> 3));
		skb2->ip_hdr = iph;

		/* ANK: dirty, but effective trick. Upgrade options only if
		 * the segment to be fragmented was THE FIRST (otherwise,
		 * options are already fixed) and make it ONCE
		 * on the initial skb, so that all the following fragments
		 * will inherit fixed options.
		 */
		if (offset == 0)
			ip_options_fragment(skb);

		/*
		 *	Added AC : If we are fragmenting a fragment that's not the
		 *		   last fragment then keep MF on each bit
		 */
		if (left > 0 || (is_frag & 1))
			iph->frag_off |= htons(IP_MF);
		ptr += len;
		offset += len;

		/*
		 *	Put this fragment into the sending queue.
		 */

		ip_statistics.IpFragCreates++;

		ip_queue_xmit(sk, dev, skb2, 2);
	}
	ip_statistics.IpFragOKs++;
}