Ejemplo n.º 1
0
int ip_mc_join_group(struct sock *sk , struct device *dev, unsigned long addr)
{
	int unused= -1;
	int i;
	if(!MULTICAST(addr))
		return -EINVAL;
	if(!(dev->flags&IFF_MULTICAST))
		return -EADDRNOTAVAIL;
	if(sk->ip_mc_list==NULL)
	{
		if((sk->ip_mc_list=(struct ip_mc_socklist *)kmalloc(sizeof(*sk->ip_mc_list), GFP_KERNEL))==NULL)
			return -ENOMEM;
		memset(sk->ip_mc_list,'\0',sizeof(*sk->ip_mc_list));
	}
	for(i=0;i<IP_MAX_MEMBERSHIPS;i++)
	{
		if(sk->ip_mc_list->multiaddr[i]==addr && sk->ip_mc_list->multidev[i]==dev)
			return -EADDRINUSE;
		if(sk->ip_mc_list->multidev[i]==NULL)
			unused=i;
	}

	if(unused==-1)
		return -ENOBUFS;
	sk->ip_mc_list->multiaddr[unused]=addr;
	sk->ip_mc_list->multidev[unused]=dev;
	ip_mc_inc_group(dev,addr);
	return 0;
}
Ejemplo n.º 2
0
unsigned inet_addr_type(u32 addr)
{
	struct flowi		fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
	struct fib_result	res;
	unsigned ret = RTN_BROADCAST;

	if (ZERONET(addr) || BADCLASS(addr))
		return RTN_BROADCAST;
	if (MULTICAST(addr))
		return RTN_MULTICAST;

#ifdef CONFIG_IP_MULTIPLE_TABLES
	res.r = NULL;
#endif
	
	if (ip_fib_local_table) {
		ret = RTN_UNICAST;
		if (!ip_fib_local_table->tb_lookup(ip_fib_local_table,
						   &fl, &res)) {
			ret = res.type;
			fib_res_put(&res);
		}
	}
	return ret;
}
int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
{
    int err;
    u32 addr = imr->imr_multiaddr.s_addr;
    struct ip_mc_socklist *iml, *i;
    struct in_device *in_dev;
    int count = 0;

    if (!MULTICAST(addr))
        return -EINVAL;

    rtnl_shlock();

    if (!imr->imr_ifindex)
        in_dev = ip_mc_find_dev(imr);
    else {
        in_dev = inetdev_by_index(imr->imr_ifindex);
        if (in_dev)
            __in_dev_put(in_dev);
    }

    if (!in_dev) {
        iml = NULL;
        err = -ENODEV;
        goto done;
    }

    iml = (struct ip_mc_socklist *)sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);

    err = -EADDRINUSE;
    for (i=sk->protinfo.af_inet.mc_list; i; i=i->next) {
        if (memcmp(&i->multi, imr, sizeof(*imr)) == 0) {
            /* New style additions are reference counted */
            if (imr->imr_address.s_addr == 0) {
                i->count++;
                err = 0;
            }
            goto done;
        }
        count++;
    }
    err = -ENOBUFS;
    if (iml == NULL || count >= sysctl_igmp_max_memberships)
        goto done;
    memcpy(&iml->multi, imr, sizeof(*imr));
    iml->next = sk->protinfo.af_inet.mc_list;
    iml->count = 1;
    sk->protinfo.af_inet.mc_list = iml;
    ip_mc_inc_group(in_dev, addr);
    iml = NULL;
    err = 0;

done:
    rtnl_shunlock();
    if (iml)
        sock_kfree_s(sk, iml, sizeof(*iml));
    return err;
}
Ejemplo n.º 4
0
int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
	struct inet_sock *inet = inet_sk(sk);
	struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
	struct rtable *rt;
	u32 saddr;
	int oif;
	int err;

	
	if (addr_len < sizeof(*usin)) 
	  	return -EINVAL;

	if (usin->sin_family != AF_INET) 
	  	return -EAFNOSUPPORT;

	sk_dst_reset(sk);

	oif = sk->sk_bound_dev_if;
	saddr = inet->saddr;
	if (MULTICAST(usin->sin_addr.s_addr)) {
		if (!oif)
			oif = inet->mc_index;
		if (!saddr)
			saddr = inet->mc_addr;
	}
	err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
			       RT_CONN_FLAGS(sk), oif,
			       sk->sk_protocol,
			       inet->sport, usin->sin_port, sk, 1);
	if (err) {
		if (err == -ENETUNREACH)
			IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
		return err;
	}

	if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
		ip_rt_put(rt);
		return -EACCES;
	}
  	if (!inet->saddr)
	  	inet->saddr = rt->rt_src;	/* Update source address */
	if (!inet->rcv_saddr)
		inet->rcv_saddr = rt->rt_src;
	inet->daddr = rt->rt_dst;
	inet->dport = usin->sin_port;
	sk->sk_state = TCP_ESTABLISHED;
	inet->id = jiffies;

	sk_dst_set(sk, &rt->u.dst);
	return(0);
}
Ejemplo n.º 5
0
struct rtable * ip_rt_slow_route (__u32 daddr, int local, struct device *dev)
{
	unsigned hash = ip_rt_hash_code(daddr)^local;
	struct rtable * rth;
	struct fib_node * f;
	struct fib_info * fi;
	__u32 saddr;

#if RT_CACHE_DEBUG >= 2
	printk("rt_cache miss @%08x\n", daddr);
#endif

	rth = kmalloc(sizeof(struct rtable), GFP_ATOMIC);
	if (!rth)
	{
		ip_rt_unlock();
		return NULL;
	}

	if (local)
		f = fib_lookup_local(daddr, dev);
	else
		f = fib_lookup (daddr, dev);

	if (f)
	{
		fi = f->fib_info;
		f->fib_use++;
	}

	if (!f || (fi->fib_flags & RTF_REJECT))
	{
#ifdef CONFIG_KERNELD	
		char wanted_route[20];
#endif		
#if RT_CACHE_DEBUG >= 2
		printk("rt_route failed @%08x\n", daddr);
#endif
		ip_rt_unlock();
		kfree_s(rth, sizeof(struct rtable));
#ifdef CONFIG_KERNELD		
		if (MULTICAST(daddr)) 
			return NULL; 
		daddr=ntohl(daddr);
		sprintf(wanted_route, "%d.%d.%d.%d",
			(int)(daddr >> 24) & 0xff, (int)(daddr >> 16) & 0xff,
			(int)(daddr >> 8) & 0xff, (int)daddr & 0xff);
		kerneld_route(wanted_route); 	/* Dynamic route request */
#endif		
		return NULL;
	}
Ejemplo n.º 6
0
Archivo: icmp.c Proyecto: 0xcc/tapip
/* NOTE: icmp dont drop @ipkb */
void icmp_send(unsigned char type, unsigned char code,
		unsigned int data, struct pkbuf *pkb_in)
{
	struct pkbuf *pkb;
	struct ip *iphdr = pkb2ip(pkb_in);
	struct icmp *icmphdr;
	int paylen = _ntohs(iphdr->ip_len);	/* icmp payload length */
	if (paylen < iphlen(iphdr) + 8)
		return;
	/*
	 * RFC 1812 Section 4.3.2.7 for sanity check
	 * An ICMP error message MUST NOT be sent as the result of receiving:
	 * 1. A packet sent as a Link Layer broadcast or multicast
	 * 2. A packet destined to an IP broadcast or IP multicast address
	 *[3] A packet whose source address has a network prefix of zero or is an
	 *      invalid source address (as defined in Section [5.3.7])
	 * 4. Any fragment of a datagram other then the first fragment (i.e., a
	 *      packet for which the fragment offset in the IP header is nonzero).
	 * 5. An ICMP error message
	 */
	if (pkb_in->pk_type != PKT_LOCALHOST)
		return;
	if (MULTICAST(iphdr->ip_dst) || BROADCAST(iphdr->ip_dst))
		return;
	if (iphdr->ip_fragoff & _htons(IP_FRAG_OFF))
		return;

	if (icmp_type_error(type) && iphdr->ip_pro == IP_P_ICMP) {
		icmphdr = ip2icmp(iphdr);
		if (icmphdr->icmp_type > ICMP_T_MAXNUM || icmp_error(icmphdr))
			return;
	}
	/* build icmp packet and send */
	/* ip packet size must be smaller than 576 bytes */
	if (IP_HRD_SZ + ICMP_HRD_SZ + paylen > 576)
		paylen = 576 - IP_HRD_SZ - ICMP_HRD_SZ;
	pkb = alloc_pkb(ETH_HRD_SZ + IP_HRD_SZ + ICMP_HRD_SZ + paylen);
	icmphdr = (struct icmp *)(pkb2ip(pkb)->ip_data);
	icmphdr->icmp_type = type;
	icmphdr->icmp_code = code;
	icmphdr->icmp_cksum = 0;
	icmphdr->icmp_undata = data;
	memcpy(icmphdr->icmp_data, (unsigned char *)iphdr, paylen);
	icmphdr->icmp_cksum =
		icmp_chksum((unsigned short *)icmphdr, ICMP_HRD_SZ + paylen);
	icmpdbg("to "IPFMT"(payload %d) [type %d code %d]\n",
		ipfmt(iphdr->ip_src), paylen, type, code);
	ip_send_info(pkb, 0, IP_HRD_SZ + ICMP_HRD_SZ + paylen,
						0, IP_P_ICMP, iphdr->ip_src);
}
Ejemplo n.º 7
0
void ip_rcv(void)
{
  uint16_t len = pkt_left;
  SNMP(ip_in_receives);
  if (!pkt_pull(&iph, sizeof(struct iphdr)))
    return;
  plen = ntohs(iph.tot_len);
  /* FIXME: for speed fold ihl/version and be smarter */
  if (iph.ihl < 5 || iph.version != 4 || len < plen) {
    SNMP(ip_in_hdr_errors);
    return;
  }
  plen -= sizeof(struct iphdr));
  if (pkt_len > plen)
    pkt_len = plen;

  /* FIXME: checksum */
  if (iph.ihl != 5)
    if (ip_options())
      return;
  /* No frags for now (memory limits on 8bit) */
  if (iph.frag_off)
    return;
  if (iph.daddr == 0xFFFFFFFF)
    pkt_type = PKT_BROADCAST;
  else if (MULTICAST(iph.daddr))
    pkt_type = PKT_MULTICAST;
  else if (iph.daddr == ip_addr || LOOPBACK(iph.daddr))
    pkt_type = PKT_HOST;
  else
    /* No forwarding so we don't have to worry about martians either */
    return;

  /* FIXME: raw sockets ?? */
  if (iph.protocol == IPPROTO_TCP)
    tcp_rcv();
  else if (iph.protocol == IPPROTO_UDP)
    udp_rcv();
  else if (iph.protocol == IPPROTO_ICMP)
    icmp_rcv();
  else
    icmp_send_unreach(ICMP_DEST_UNREACH, ICMP_PROT_UNREACH);
}
Ejemplo n.º 8
0
static void blog_skip_ppp_multicast(struct sk_buff *skb, const unsigned char *dest)
{
	unsigned short usProtocol;
	unsigned short usProtocol1;
	__u32 *usMulticastIp;

    if(!is_multicast_ether_addr(dest) )
    {
        usProtocol = ((u16 *) skb->data)[-1];
        usProtocol1 = skb->data[PPPOE_UDPPROTOID_OFFSET];
        usMulticastIp = (__u32*)(skb->data + PPPOE_UDPADDR_OFFSET);
        if((usProtocol == __constant_htons(ETH_P_PPP_SES))  
            && (usProtocol1 == IPPROTO_UDP))
        {
            if(MULTICAST((*usMulticastIp)))
            {
                blog_skip(skb);
            }
        }
    }
}
Ejemplo n.º 9
0
int ip_mc_leave_group(struct sock *sk, struct device *dev, unsigned long addr)
{
	int i;
	if(!MULTICAST(addr))
		return -EINVAL;
	if(!(dev->flags&IFF_MULTICAST))
		return -EADDRNOTAVAIL;
	if(sk->ip_mc_list==NULL)
		return -EADDRNOTAVAIL;

	for(i=0;i<IP_MAX_MEMBERSHIPS;i++)
	{
		if(sk->ip_mc_list->multiaddr[i]==addr && sk->ip_mc_list->multidev[i]==dev)
		{
			sk->ip_mc_list->multidev[i]=NULL;
			ip_mc_dec_group(dev,addr);
			return 0;
		}
	}
	return -EADDRNOTAVAIL;
}
Ejemplo n.º 10
0
static int match(const struct sk_buff *skb,
      const struct net_device *in,
      const struct net_device *out,
      const struct xt_match *match,
      const void *matchinfo,
      int offset,
      unsigned int protoff,
      int *hotdrop)
{
	u_int8_t type;
	const struct xt_pkttype_info *info = matchinfo;

	if (skb->pkt_type == PACKET_LOOPBACK)
		type = (MULTICAST(skb->nh.iph->daddr)
			? PACKET_MULTICAST
			: PACKET_BROADCAST);
	else
		type = skb->pkt_type;

	return (type == info->pkttype) ^ info->invert;
}
Ejemplo n.º 11
0
/** Open the varp multicast socket.
 *
 * @param mcaddr multicast address 
 * @param port port
 * @param val return parameter for the socket
 * @return 0 on success, error code otherwise
 */
int varp_mcast_open(uint32_t mcaddr, uint16_t port, int *val){
    int err = 0;
    int flags = VSOCK_REUSE;
    int sock = 0;
    
    dprintf(">\n");
    flags |= VSOCK_MULTICAST;
    flags |= VSOCK_BROADCAST;
    
    err = create_socket(SOCK_DGRAM, mcaddr, port, flags, &sock);
    if(err < 0) goto exit;
    if(MULTICAST(mcaddr)){
        err = setsock_multicast_ttl(sock, 1);
        if(err < 0) goto exit;
    }
  exit:
    if(err){
        shutdown(sock, 2);
    }
    *val = (err ? -1 : sock);
    dprintf("< err=%d val=%d\n", err, *val);
    return err;
}
Ejemplo n.º 12
0
static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
		       size_t len)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipcm_cookie ipc;
	struct rtable *rt = NULL;
	int free = 0;
	u32 daddr;
	u32 saddr;
	u8  tos;
	int err;

	err = -EMSGSIZE;
	if (len < 0 || len > 0xFFFF)
		goto out;

	/*
	 *	Check the flags.
	 */

	err = -EOPNOTSUPP;
	if (msg->msg_flags & MSG_OOB)	/* Mirror BSD error message */
		goto out;               /* compatibility */
			 
	/*
	 *	Get and verify the address. 
	 */

	if (msg->msg_namelen) {
		struct sockaddr_in *usin = (struct sockaddr_in*)msg->msg_name;
		err = -EINVAL;
		if (msg->msg_namelen < sizeof(*usin))
			goto out;
		if (usin->sin_family != AF_INET) {
			static int complained;
			if (!complained++)
				printk(KERN_INFO "%s forgot to set AF_INET in "
						 "raw sendmsg. Fix it!\n",
						 current->comm);
			err = -EAFNOSUPPORT;
			if (usin->sin_family)
				goto out;
		}
		daddr = usin->sin_addr.s_addr;
		/* ANK: I did not forget to get protocol from port field.
		 * I just do not know, who uses this weirdness.
		 * IP_HDRINCL is much more convenient.
		 */
	} else {
		err = -EDESTADDRREQ;
		if (sk->sk_state != TCP_ESTABLISHED) 
			goto out;
		daddr = inet->daddr;
	}

	ipc.addr = inet->saddr;
	ipc.opt = NULL;
	ipc.oif = sk->sk_bound_dev_if;

	if (msg->msg_controllen) {
		err = ip_cmsg_send(msg, &ipc);
		if (err)
			goto out;
		if (ipc.opt)
			free = 1;
	}

	saddr = ipc.addr;
	ipc.addr = daddr;

	if (!ipc.opt)
		ipc.opt = inet->opt;

	if (ipc.opt) {
		err = -EINVAL;
		/* Linux does not mangle headers on raw sockets,
		 * so that IP options + IP_HDRINCL is non-sense.
		 */
		if (inet->hdrincl)
			goto done;
		if (ipc.opt->srr) {
			if (!daddr)
				goto done;
			daddr = ipc.opt->faddr;
		}
	}
	tos = RT_CONN_FLAGS(sk);
	if (msg->msg_flags & MSG_DONTROUTE)
		tos |= RTO_ONLINK;

	if (MULTICAST(daddr)) {
		if (!ipc.oif)
			ipc.oif = inet->mc_index;
		if (!saddr)
			saddr = inet->mc_addr;
	}

	{
		struct flowi fl = { .oif = ipc.oif,
				    .nl_u = { .ip4_u =
					      { .daddr = daddr,
						.saddr = saddr,
						.tos = tos } },
				    .proto = inet->hdrincl ? IPPROTO_RAW :
					    		     sk->sk_protocol,
				  };
		if (!inet->hdrincl)
			raw_probe_proto_opt(&fl, msg);

		err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));
	}
	if (err)
		goto done;

	err = -EACCES;
	if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
		goto done;

	if (msg->msg_flags & MSG_CONFIRM)
		goto do_confirm;
back_from_confirm:

	if (inet->hdrincl)
		err = raw_send_hdrinc(sk, msg->msg_iov, len, 
					rt, msg->msg_flags);
	
	 else {
		if (!ipc.addr)
			ipc.addr = rt->rt_dst;
		lock_sock(sk);
		err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
					&ipc, rt, msg->msg_flags);
		if (err)
			ip_flush_pending_frames(sk);
		else if (!(msg->msg_flags & MSG_MORE))
			err = ip_push_pending_frames(sk);
		release_sock(sk);
	}
done:
	if (free)
		kfree(ipc.opt);
	ip_rt_put(rt);

out:	return err < 0 ? err : len;

do_confirm:
	dst_confirm(&rt->u.dst);
	if (!(msg->msg_flags & MSG_PROBE) || len)
		goto back_from_confirm;
	err = 0;
	goto done;
}
Ejemplo n.º 13
0
/*
 * This routine builds the appropriate hardware/IP headers for
 * the routine.  It assumes that if *dev != NULL then the
 * protocol knows what it's doing, otherwise it uses the
 * routing/ARP tables to select a device struct.
 */
int ip_build_header(struct sk_buff *skb, __u32 saddr, __u32 daddr,
		struct device **dev, int type, struct options *opt,
		int len, int tos, int ttl, struct rtable ** rp)
{
	struct rtable *rt;
	__u32 raddr;
	int tmp;
	struct iphdr *iph;
	__u32 final_daddr = daddr;


	if (opt && opt->srr)
		daddr = opt->faddr;

	/*
	 *	See if we need to look up the device.
	 */

#ifdef CONFIG_IP_MULTICAST	
	if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
		*dev=dev_get(skb->sk->ip_mc_name);
#endif
	if (rp)
	{
		rt = ip_check_route(rp, daddr, skb->localroute, *dev);
		/*
		 * If rp != NULL rt_put following below should not
		 * release route, so that...
		 */
		if (rt)
			atomic_inc(&rt->rt_refcnt);
	}
	else
		rt = ip_rt_route(daddr, skb->localroute, *dev);


	if (*dev == NULL)
	{
		if (rt == NULL)
		{
			ip_statistics.IpOutNoRoutes++;
			return(-ENETUNREACH);
		}

		*dev = rt->rt_dev;
	}

	if ((LOOPBACK(saddr) && !LOOPBACK(daddr)) || !saddr)
		saddr = rt ? rt->rt_src : (*dev)->pa_addr;

	raddr = rt ? rt->rt_gateway : daddr;

	if (opt && opt->is_strictroute && rt && (rt->rt_flags & RTF_GATEWAY))
	{
		ip_rt_put(rt);
		ip_statistics.IpOutNoRoutes++;
		return -ENETUNREACH;
	}

	/*
	 *	Now build the MAC header.
	 */

	if (type==IPPROTO_TCP)
		tmp = ip_send_room(rt, skb, raddr, len, *dev, saddr);
	else
		tmp = ip_send(rt, skb, raddr, len, *dev, saddr);

	ip_rt_put(rt);

	/*
	 *	Book keeping
	 */

	skb->dev = *dev;
	skb->saddr = saddr;
	
	/*
	 *	Now build the IP header.
	 */

	/*
	 *	If we are using IPPROTO_RAW, then we don't need an IP header, since
	 *	one is being supplied to us by the user
	 */

	if(type == IPPROTO_RAW)
		return (tmp);

	/*
	 *	Build the IP addresses
	 */
	 
	if (opt)
		iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr) + opt->optlen);
	else
		iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr));

	iph->version  = 4;
	iph->ihl      = 5;
	iph->tos      = tos;
	iph->frag_off = 0;
	iph->ttl      = ttl;
	iph->daddr    = daddr;
	iph->saddr    = saddr;
	iph->protocol = type;
	skb->ip_hdr   = iph;

	if (!opt || !opt->optlen)
		return sizeof(struct iphdr) + tmp;
	iph->ihl += opt->optlen>>2;
	ip_options_build(skb, opt, final_daddr, (*dev)->pa_addr, 0);
	return iph->ihl*4 + tmp;
}
static int arp_process(struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	struct in_device *in_dev = in_dev_get(dev);
	struct arphdr *arp;
	unsigned char *arp_ptr;
	struct rtable *rt;
	unsigned char *sha, *tha;
	u32 sip, tip;
	u16 dev_type = dev->type;
	int addr_type;
	struct neighbour *n;

	/* arp_rcv below verifies the ARP header and verifies the device
	 * is ARP'able.
	 */

	if (in_dev == NULL)
		goto out;

	arp = skb->nh.arph;

	switch (dev_type) {
	default:	
		if (arp->ar_pro != htons(ETH_P_IP) ||
		    htons(dev_type) != arp->ar_hrd)
			goto out;
		break;
#ifdef CONFIG_NET_ETHERNET
	case ARPHRD_ETHER:
#endif
#ifdef CONFIG_TR
	case ARPHRD_IEEE802_TR:
#endif
#ifdef CONFIG_FDDI
	case ARPHRD_FDDI:
#endif
#ifdef CONFIG_NET_FC
	case ARPHRD_IEEE802:
#endif
#if defined(CONFIG_NET_ETHERNET) || defined(CONFIG_TR) || \
    defined(CONFIG_FDDI)	 || defined(CONFIG_NET_FC)
		/*
		 * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802
		 * devices, according to RFC 2625) devices will accept ARP
		 * hardware types of either 1 (Ethernet) or 6 (IEEE 802.2).
		 * This is the case also of FDDI, where the RFC 1390 says that
		 * FDDI devices should accept ARP hardware of (1) Ethernet,
		 * however, to be more robust, we'll accept both 1 (Ethernet)
		 * or 6 (IEEE 802.2)
		 */
		if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
		     arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
		    arp->ar_pro != htons(ETH_P_IP))
			goto out;
		break;
#endif
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
	case ARPHRD_AX25:
		if (arp->ar_pro != htons(AX25_P_IP) ||
		    arp->ar_hrd != htons(ARPHRD_AX25))
			goto out;
		break;
#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
	case ARPHRD_NETROM:
		if (arp->ar_pro != htons(AX25_P_IP) ||
		    arp->ar_hrd != htons(ARPHRD_NETROM))
			goto out;
		break;
#endif
#endif
	}

	/* Understand only these message types */

	if (arp->ar_op != htons(ARPOP_REPLY) &&
	    arp->ar_op != htons(ARPOP_REQUEST))
		goto out;

/*
 *	Extract fields
 */
	arp_ptr= (unsigned char *)(arp+1);
	sha	= arp_ptr;
	arp_ptr += dev->addr_len;
	memcpy(&sip, arp_ptr, 4);
	arp_ptr += 4;
	tha	= arp_ptr;
	arp_ptr += dev->addr_len;
	memcpy(&tip, arp_ptr, 4);
/* 
 *	Check for bad requests for 127.x.x.x and requests for multicast
 *	addresses.  If this is one such, delete it.
 */
	if (LOOPBACK(tip) || MULTICAST(tip))
		goto out;

/*
 *     Special case: We must set Frame Relay source Q.922 address
 */
	if (dev_type == ARPHRD_DLCI)
		sha = dev->broadcast;

/*
 *  Process entry.  The idea here is we want to send a reply if it is a
 *  request for us or if it is a request for someone else that we hold
 *  a proxy for.  We want to add an entry to our cache if it is a reply
 *  to us or if it is a request for our address.  
 *  (The assumption for this last is that if someone is requesting our 
 *  address, they are probably intending to talk to us, so it saves time 
 *  if we cache their address.  Their address is also probably not in 
 *  our cache, since ours is not in their cache.)
 * 
 *  Putting this another way, we only care about replies if they are to
 *  us, in which case we add them to the cache.  For requests, we care
 *  about those for us and those for our proxies.  We reply to both,
 *  and in the case of requests for us we add the requester to the arp 
 *  cache.
 */

	/* Special case: IPv4 duplicate address detection packet (RFC2131) */
	if (sip == 0) {
		if (arp->ar_op == htons(ARPOP_REQUEST) &&
		    inet_addr_type(tip) == RTN_LOCAL &&
		    !arp_ignore(in_dev,dev,sip,tip))
			arp_send(ARPOP_REPLY,ETH_P_ARP,tip,dev,tip,sha,dev->dev_addr,dev->dev_addr);
		goto out;
	}

	if (arp->ar_op == htons(ARPOP_REQUEST) &&
	    ip_route_input(skb, tip, sip, 0, dev) == 0) {

		rt = (struct rtable*)skb->dst;
		addr_type = rt->rt_type;

		if (addr_type == RTN_LOCAL) {
			n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
			if (n) {
				int dont_send = 0;

				if (!dont_send)
					dont_send |= arp_ignore(in_dev,dev,sip,tip);
				if (!dont_send && IN_DEV_ARPFILTER(in_dev))
					dont_send |= arp_filter(sip,tip,dev); 
				if (!dont_send)
					arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);

				neigh_release(n);
			}
			goto out;
		} else if (IN_DEV_FORWARD(in_dev)) {
			if ((rt->rt_flags&RTCF_DNAT) ||
			    (addr_type == RTN_UNICAST  && rt->u.dst.dev != dev &&
			     (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, &tip, dev, 0)))) {
				n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
				if (n)
					neigh_release(n);

				if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || 
				    skb->pkt_type == PACKET_HOST ||
				    in_dev->arp_parms->proxy_delay == 0) {
					arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
				} else {
					pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb);
					in_dev_put(in_dev);
					return 0;
				}
				goto out;
			}
		}
	}

	/* Update our ARP tables */

	n = __neigh_lookup(&arp_tbl, &sip, dev, 0);

#ifdef CONFIG_IP_ACCEPT_UNSOLICITED_ARP
	/* Unsolicited ARP is not accepted by default.
	   It is possible, that this option should be enabled for some
	   devices (strip is candidate)
	 */
	if (n == NULL &&
	    arp->ar_op == htons(ARPOP_REPLY) &&
	    inet_addr_type(sip) == RTN_UNICAST)
		n = __neigh_lookup(&arp_tbl, &sip, dev, -1);
#endif

	if (n) {
		int state = NUD_REACHABLE;
		int override;

		/* If several different ARP replies follows back-to-back,
		   use the FIRST one. It is possible, if several proxy
		   agents are active. Taking the first reply prevents
		   arp trashing and chooses the fastest router.
		 */
		override = time_after(jiffies, n->updated + n->parms->locktime);

		/* Broadcast replies and request packets
		   do not assert neighbour reachability.
		 */
		if (arp->ar_op != htons(ARPOP_REPLY) ||
		    skb->pkt_type != PACKET_HOST)
			state = NUD_STALE;
		neigh_update(n, sha, state, override ? NEIGH_UPDATE_F_OVERRIDE : 0);
		neigh_release(n);
	}
Ejemplo n.º 15
0
void ip_queue_xmit(struct sock *sk, struct device *dev,
	      struct sk_buff *skb, int free)
{
	unsigned int tot_len;
	struct iphdr *iph;

	IS_SKB(skb);

	/*
	 *	Do some book-keeping in the packet for later
	 */

	skb->sk = sk;
	skb->dev = dev;
	skb->when = jiffies;

	/*
	 *	Find the IP header and set the length. This is bad
	 *	but once we get the skb data handling code in the
	 *	hardware will push its header sensibly and we will
	 *	set skb->ip_hdr to avoid this mess and the fixed
	 *	header length problem
	 */

	iph = skb->ip_hdr;
	tot_len = skb->len - (((unsigned char *)iph) - skb->data);
	iph->tot_len = htons(tot_len);

	switch (free) {
		/* No reassigning numbers to fragments... */
		default:
			free = 1;
			break;
		case 0:
			add_to_send_queue(sk, skb);
			/* fall through */
		case 1:
			iph->id = htons(ip_id_count++);
	}

	skb->free = free;

	/* Sanity check */
	if (dev == NULL)
		goto no_device;

#ifdef CONFIG_FIREWALL
	if (call_out_firewall(PF_INET, skb->dev, iph, NULL) < FW_ACCEPT)
		goto out;
#endif	

	/*
	 *	Do we need to fragment. Again this is inefficient.
	 *	We need to somehow lock the original buffer and use
	 *	bits of it.
	 */

	if (tot_len > dev->mtu)
		goto fragment;

	/*
	 *	Add an IP checksum
	 */

	ip_send_check(iph);

	/*
	 *	More debugging. You cannot queue a packet already on a list
	 *	Spot this and moan loudly.
	 */
	if (skb->next != NULL)
	{
		NETDEBUG(printk("ip_queue_xmit: next != NULL\n"));
		skb_unlink(skb);
	}

	/*
	 *	If the indicated interface is up and running, send the packet.
	 */
	 
	ip_statistics.IpOutRequests++;
#ifdef CONFIG_IP_ACCT
	ip_fw_chk(iph,dev,NULL,ip_acct_chain,0,IP_FW_MODE_ACCT_OUT);
#endif	

#ifdef CONFIG_IP_MULTICAST	

	/*
	 *	Multicasts are looped back for other local users
	 */
	 
	if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
	{
		if(sk==NULL || sk->ip_mc_loop)
		{
			if(iph->daddr==IGMP_ALL_HOSTS || (dev->flags&IFF_ALLMULTI))
			{
				ip_loopback(dev,skb);
			}
			else
			{
				struct ip_mc_list *imc=dev->ip_mc_list;
				while(imc!=NULL)
				{
					if(imc->multiaddr==iph->daddr)
					{
						ip_loopback(dev,skb);
						break;
					}
					imc=imc->next;
				}
			}
		}
		/* Multicasts with ttl 0 must not go beyond the host */
		
		if (iph->ttl==0)
			goto out;
	}
#endif
	if ((dev->flags & IFF_BROADCAST) && !(dev->flags & IFF_LOOPBACK)
	    && (iph->daddr==dev->pa_brdaddr || iph->daddr==0xFFFFFFFF))
		ip_loopback(dev,skb);
		
	if (dev->flags & IFF_UP)
	{
		/*
		 *	If we have an owner use its priority setting,
		 *	otherwise use NORMAL
		 */
		int priority = SOPRI_NORMAL;
		if (sk)
			priority = sk->priority;

		dev_queue_xmit(skb, dev, priority);
		return;
	}
	if(sk)
		sk->err = ENETDOWN;
	ip_statistics.IpOutDiscards++;
out:
	if (free)
		kfree_skb(skb, FREE_WRITE);
	return;

no_device:
	NETDEBUG(printk("IP: ip_queue_xmit dev = NULL\n"));
	goto out;

fragment:
	ip_fragment(sk,skb,dev,0);
	goto out;
}
Ejemplo n.º 16
0
/***
 *	arp_rcv:	Receive an arp request by the device layer.
 */
int rt_arp_rcv(struct rtskb *skb, struct rtnet_device *rtdev, struct rtpacket_type *pt)
{
	struct net_device *dev = dev_get_by_rtdev(rtdev);
	struct arphdr *arp = skb->nh.arph;
	unsigned char *arp_ptr= (unsigned char *)(arp+1);
	unsigned char *sha, *tha;
	u32 sip, tip;
	u16 dev_type = dev->type;

/*
 *	The hardware length of the packet should match the hardware length
 *	of the device.  Similarly, the hardware types should match.  The
 *	device should be ARP-able.  Also, if pln is not 4, then the lookup
 *	is not from an IP number.  We can't currently handle this, so toss
 *	it. 
 */  
	if (arp->ar_hln != dev->addr_len    || 
	    dev->flags & IFF_NOARP ||
	    skb->pkt_type == PACKET_OTHERHOST ||
	    skb->pkt_type == PACKET_LOOPBACK ||
	    arp->ar_pln != 4)
		goto out;

	switch (dev_type) {
	default:	
		if ( arp->ar_pro != __constant_htons(ETH_P_IP) &&
		     htons(dev_type) != arp->ar_hrd )
			goto out;
		break;
	case ARPHRD_ETHER:
		/*
		 * ETHERNET devices will accept ARP hardware types of either
		 * 1 (Ethernet) or 6 (IEEE 802.2).
		 */
		if (arp->ar_hrd != __constant_htons(ARPHRD_ETHER) &&
		    arp->ar_hrd != __constant_htons(ARPHRD_IEEE802)) {
			goto out;
		}
		if (arp->ar_pro != __constant_htons(ETH_P_IP)) {
			goto out;
		}
		break;
	}

	/* Understand only these message types */
	if (arp->ar_op != __constant_htons(ARPOP_REPLY) &&
	    arp->ar_op != __constant_htons(ARPOP_REQUEST))
		goto out;

/*
 *	Extract fields
 */
	sha=arp_ptr;
	arp_ptr += dev->addr_len;
	memcpy(&sip, arp_ptr, 4);

	arp_ptr += 4;
	tha=arp_ptr;
	arp_ptr += dev->addr_len;
	memcpy(&tip, arp_ptr, 4);

/* 
 *	Check for bad requests for 127.x.x.x and requests for multicast
 *	addresses.  If this is one such, delete it.
 */
	if (LOOPBACK(tip) || MULTICAST(tip))
		goto out;


	if (dev_type == ARPHRD_DLCI)
		sha = dev->broadcast;

/*
 *  Process entry.  The idea here is we want to send a reply if it is a
 *  request for us or if it is a request for someone else that we hold
 *  a proxy for.  We want to add an entry to our cache if it is a reply
 *  to us or if it is a request for our address.  
 *  (The assumption for this last is that if someone is requesting our 
 *  address, they are probably intending to talk to us, so it saves time 
 *  if we cache their address.  Their address is also probably not in 
 *  our cache, since ours is not in their cache.)
 * 
 *  Putting this another way, we only care about replies if they are to
 *  us, in which case we add them to the cache.  For requests, we care
 *  about those for us and those for our proxies.  We reply to both,
 *  and in the case of requests for us we add the requester to the arp 
 *  cache.
 */

	if ( rt_ip_route_input(skb, tip, sip, rtdev)==0 ) {
		rt_arp_table_add(sip, sha);
		if ( arp->ar_op==__constant_htons(ARPOP_REQUEST) )
			rt_arp_send(ARPOP_REPLY,ETH_P_ARP,sip,rtdev,tip,sha,dev->dev_addr,sha);
	}

out:
	kfree_rtskb(skb);
	return 0;
}
Ejemplo n.º 17
0
int ip_rcv_finish(struct sk_buff *skb)
{
        struct iphdr *iph = skb->nh.iph;
        /*
         *  Initialise the virtual path cache for the packet. It describes
         *  how the packet travels inside Linux networking.
         */
		
        if (skb->dst == NULL) {
            int err = 1;
            
            if(g_session_forward_enable)
            {
				if (skb->nfct && IP_CT_DIR_REPLY == CTINFO2DIR(skb->nfctinfo)
	                && !(((struct ff_cache_info_ex*)skb->nfct)->ff_info[IP_CT_DIR_ORIGINAL].flag & DRV_FF_FLAG_LINUX)
	                && !is_vpn_pkt(skb)){
	                sessfw_debug(1, "--- don't lookup route ---\n" TUPLE_INFO(
	                   &((struct ff_cache_info_ex*)skb->nfct)->ct.tuplehash[IP_CT_DIR_ORIGINAL].tuple,
	                   &((struct ff_cache_info_ex*)skb->nfct)->ct.tuplehash[IP_CT_DIR_REPLY].tuple));
	                err = 0;
	                goto route_label;
				}
				#ifdef CONFIG_NETSESSION
				if (skb->ns && NS_DIR_REPLY == NSINFO2DIR(skb->nsinfo) && 
					!(((struct net_session *)skb->ns)->ff_info[NS_DIR_ORIGINAL].flag & NS_FF_LINUX)){
					err = 0;
	                goto route_label;
				}
				#endif
            }
            if (0 != err)
            {
                sessfw_debug(2, "--- lookup route ---\n" TUPLE_INFO(
                   &((struct ff_cache_info_ex*)skb->nfct)->ct.tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                   &((struct ff_cache_info_ex*)skb->nfct)->ct.tuplehash[IP_CT_DIR_REPLY].tuple));
                err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
                         skb->dev);                
            }
			
            
route_label:
            if (unlikely(err)) {
                if (err == -EHOSTUNREACH)
                    IP_INC_STATS_BH(if_dev_vrf(skb->dev), IPSTATS_MIB_INADDRERRORS);
                goto drop;
            }
        }

        if (iph->ihl > 5 && ip_rcv_options(skb))
    	{
			goto drop;
    	}
            //goto drop;

        
        /*A new entry of mcast packet. add by wangdi*/
        if (MULTICAST(iph->daddr))
        {
            return ipv4_mcast_rcv(skb, iph);
        }
		else if (g_session_forward_enable)
		{
			s32 ret;
			ret = session_forward_fix[ns_mode](skb);
			if (ret != 2)
				return ret;
			else
				return dst_input(skb);/* 没匹配上会话转发 */
		}
        else
            return dst_input(skb);

    drop:
        kfree_skb(skb);
        return NET_RX_DROP;
}
Ejemplo n.º 18
0
static void arp_reply(struct sk_buff *skb)
{
	struct netpoll_info *npinfo = skb->dev->npinfo;
	struct arphdr *arp;
	unsigned char *arp_ptr;
	int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
	__be32 sip, tip;
	unsigned char *sha;
	struct sk_buff *send_skb;
	struct netpoll *np = NULL;

	if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
		np = npinfo->rx_np;
	if (!np)
		return;

	/* No arp on this interface */
	if (skb->dev->flags & IFF_NOARP)
		return;

	if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
				 (2 * skb->dev->addr_len) +
				 (2 * sizeof(u32)))))
		return;

	skb_reset_network_header(skb);
	skb_reset_transport_header(skb);
	arp = arp_hdr(skb);

	if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
	     arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
	    arp->ar_pro != htons(ETH_P_IP) ||
	    arp->ar_op != htons(ARPOP_REQUEST))
		return;

	arp_ptr = (unsigned char *)(arp+1);
	/* save the location of the src hw addr */
	sha = arp_ptr;
	arp_ptr += skb->dev->addr_len;
	memcpy(&sip, arp_ptr, 4);
	arp_ptr += 4;
	/* if we actually cared about dst hw addr, it would get copied here */
	arp_ptr += skb->dev->addr_len;
	memcpy(&tip, arp_ptr, 4);

	/* Should we ignore arp? */
	if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
		return;

	size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
	send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
			    LL_RESERVED_SPACE(np->dev));

	if (!send_skb)
		return;

	skb_reset_network_header(send_skb);
	arp = (struct arphdr *) skb_put(send_skb, size);
	send_skb->dev = skb->dev;
	send_skb->protocol = htons(ETH_P_ARP);

	/* Fill the device header for the ARP frame */

	if (np->dev->hard_header &&
	    np->dev->hard_header(send_skb, skb->dev, ptype,
				 sha, np->local_mac,
				 send_skb->len) < 0) {
		kfree_skb(send_skb);
		return;
	}

	/*
	 * Fill out the arp protocol part.
	 *
	 * we only support ethernet device type,
	 * which (according to RFC 1390) should always equal 1 (Ethernet).
	 */

	arp->ar_hrd = htons(np->dev->type);
	arp->ar_pro = htons(ETH_P_IP);
	arp->ar_hln = np->dev->addr_len;
	arp->ar_pln = 4;
	arp->ar_op = htons(type);

	arp_ptr=(unsigned char *)(arp + 1);
	memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
	arp_ptr += np->dev->addr_len;
	memcpy(arp_ptr, &tip, 4);
	arp_ptr += 4;
	memcpy(arp_ptr, sha, np->dev->addr_len);
	arp_ptr += np->dev->addr_len;
	memcpy(arp_ptr, &sip, 4);

	netpoll_send_skb(np, send_skb);
}
Ejemplo n.º 19
0
int ip_build_xmit(struct sock *sk,
		   void getfrag (const void *,
				 __u32,
				 char *,
				 unsigned int,	
				 unsigned int),
		   const void *frag,
		   unsigned short int length,
		   __u32 daddr,
		   __u32 user_saddr,
		   struct options * opt,
		   int flags,
		   int type,
		   int noblock) 
{
	struct rtable *rt;
	unsigned int fraglen, maxfraglen, fragheaderlen;
	int offset, mf;
	__u32 saddr;
	unsigned short id;
	struct iphdr *iph;
	__u32 raddr;
	struct device *dev = NULL;
	struct hh_cache * hh=NULL;
	int nfrags=0;
	__u32 true_daddr = daddr;

	if (opt && opt->srr && !sk->ip_hdrincl)
	  daddr = opt->faddr;
	
	ip_statistics.IpOutRequests++;

#ifdef CONFIG_IP_MULTICAST	
	if(MULTICAST(daddr) && *sk->ip_mc_name)
	{
		dev=dev_get(sk->ip_mc_name);
		if(!dev)
			return -ENODEV;
		rt=NULL;
		if (sk->saddr && (!LOOPBACK(sk->saddr) || LOOPBACK(daddr)))
			saddr = sk->saddr;
		else
			saddr = dev->pa_addr;
	}
	else
	{
#endif	
		rt = ip_check_route(&sk->ip_route_cache, daddr,
				    sk->localroute || (flags&MSG_DONTROUTE) ||
				    (opt && opt->is_strictroute), sk->bound_device);
		if (rt == NULL) 
		{
			ip_statistics.IpOutNoRoutes++;
			return(-ENETUNREACH);
		}
		saddr = rt->rt_src;

		hh = rt->rt_hh;
	
		if (sk->saddr && (!LOOPBACK(sk->saddr) || LOOPBACK(daddr)))
			saddr = sk->saddr;
			
		dev=rt->rt_dev;
#ifdef CONFIG_IP_MULTICAST
	}
	if (rt && !dev)
		dev = rt->rt_dev;
#endif		
	if (user_saddr)
		saddr = user_saddr;

	raddr = rt ? rt->rt_gateway : daddr;
	/*
	 *	Now compute the buffer space we require
	 */ 
	 
	/*
	 *	Try the simple case first. This leaves broadcast, multicast, fragmented frames, and by
	 *	choice RAW frames within 20 bytes of maximum size(rare) to the long path
	 */

	if (!sk->ip_hdrincl) {
		length += sizeof(struct iphdr);
		if (opt) {
			/* make sure not to exceed maximum packet size */
			if (0xffff - length < opt->optlen)
				return -EMSGSIZE;
			length += opt->optlen;
		}
	}

	if(length <= dev->mtu && !MULTICAST(daddr) && daddr!=0xFFFFFFFF && daddr!=dev->pa_brdaddr)
	{	
		int error;
		struct sk_buff *skb=sock_alloc_send_skb(sk, length+15+dev->hard_header_len,0, noblock, &error);
		if(skb==NULL)
		{
			ip_statistics.IpOutDiscards++;
			return error;
		}
		skb->dev=dev;
		skb->protocol = htons(ETH_P_IP);
		skb->free=1;
		skb->when=jiffies;
		skb->sk=sk;
		skb->arp=0;
		skb->saddr=saddr;
		skb->raddr = raddr;
		skb_reserve(skb,(dev->hard_header_len+15)&~15);
		if (hh)
		{
			skb->arp=1;
			memcpy(skb_push(skb,dev->hard_header_len),hh->hh_data,dev->hard_header_len);
			if (!hh->hh_uptodate)
			{
				skb->arp = 0;
#if RT_CACHE_DEBUG >= 2
				printk("ip_build_xmit: hh miss %08x via %08x\n", rt->rt_dst, rt->rt_gateway);
#endif				
			}
		}
		else if(dev->hard_header)
		{
			if(dev->hard_header(skb,dev,ETH_P_IP,NULL,NULL,0)>0)
				skb->arp=1;
		}
		else
			skb->arp=1;
		skb->ip_hdr=iph=(struct iphdr *)skb_put(skb,length);
		dev_lock_list();
		if(!sk->ip_hdrincl)
		{
			iph->version=4;
			iph->ihl=5;
			iph->tos=sk->ip_tos;
			iph->tot_len = htons(length);
			iph->id=htons(ip_id_count++);
			iph->frag_off = 0;
			iph->ttl=sk->ip_ttl;
			iph->protocol=type;
			iph->saddr=saddr;
			iph->daddr=daddr;
			if (opt) 
			{
				iph->ihl += opt->optlen>>2;
				ip_options_build(skb, opt,
						 true_daddr, dev->pa_addr, 0);
			}
			iph->check=0;
			iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
			getfrag(frag,saddr,((char *)iph)+iph->ihl*4,0, length-iph->ihl*4);
		}
Ejemplo n.º 20
0
static int raw_sendmsg(struct sock *sk, struct msghdr *msg, int len)
{
	struct ipcm_cookie ipc;
	struct rawfakehdr rfh;
	struct rtable *rt = NULL;
	int free = 0;
	u32 daddr;
	u8  tos;
	int err;

	/* This check is ONLY to check for arithmetic overflow
	   on integer(!) len. Not more! Real check will be made
	   in ip_build_xmit --ANK

	   BTW socket.c -> af_*.c -> ... make multiple
	   invalid conversions size_t -> int. We MUST repair it f.e.
	   by replacing all of them with size_t and revise all
	   the places sort of len += sizeof(struct iphdr)
	   If len was ULONG_MAX-10 it would be cathastrophe  --ANK
	 */

	err = -EMSGSIZE;
	if (len < 0 || len > 0xFFFF)
		goto out;

	/*
	 *	Check the flags.
	 */

	err = -EOPNOTSUPP;
	if (msg->msg_flags & MSG_OOB)	/* Mirror BSD error message */
		goto out;               /* compatibility */
			 
	/*
	 *	Get and verify the address. 
	 */

	if (msg->msg_namelen) {
		struct sockaddr_in *usin = (struct sockaddr_in*)msg->msg_name;
		err = -EINVAL;
		if (msg->msg_namelen < sizeof(*usin))
			goto out;
		if (usin->sin_family != AF_INET) {
			static int complained;
			if (!complained++)
				printk(KERN_INFO "%s forgot to set AF_INET in "
						 "raw sendmsg. Fix it!\n",
						 current->comm);
			err = -EINVAL;
			if (usin->sin_family)
				goto out;
		}
		daddr = usin->sin_addr.s_addr;
		/* ANK: I did not forget to get protocol from port field.
		 * I just do not know, who uses this weirdness.
		 * IP_HDRINCL is much more convenient.
		 */
	} else {
		err = -EDESTADDRREQ;
		if (sk->state != TCP_ESTABLISHED) 
			goto out;
		daddr = sk->daddr;
	}

	ipc.addr = sk->saddr;
	ipc.opt = NULL;
	ipc.oif = sk->bound_dev_if;

	if (msg->msg_controllen) {
		err = ip_cmsg_send(msg, &ipc);
		if (err)
			goto out;
		if (ipc.opt)
			free = 1;
	}

	rfh.saddr = ipc.addr;
	ipc.addr = daddr;

	if (!ipc.opt)
		ipc.opt = sk->protinfo.af_inet.opt;

	if (ipc.opt) {
		err = -EINVAL;
		/* Linux does not mangle headers on raw sockets,
		 * so that IP options + IP_HDRINCL is non-sense.
		 */
		if (sk->protinfo.af_inet.hdrincl)
			goto done;
		if (ipc.opt->srr) {
			if (!daddr)
				goto done;
			daddr = ipc.opt->faddr;
		}
	}
	tos = RT_TOS(sk->protinfo.af_inet.tos) | sk->localroute;
	if (msg->msg_flags & MSG_DONTROUTE)
		tos |= RTO_ONLINK;

	if (MULTICAST(daddr)) {
		if (!ipc.oif)
			ipc.oif = sk->protinfo.af_inet.mc_index;
		if (!rfh.saddr)
			rfh.saddr = sk->protinfo.af_inet.mc_addr;
	}

	err = ip_route_output(&rt, daddr, rfh.saddr, tos, ipc.oif);

	if (err)
		goto done;

	err = -EACCES;
	if (rt->rt_flags & RTCF_BROADCAST && !sk->broadcast)
		goto done;

	if (msg->msg_flags & MSG_CONFIRM)
		goto do_confirm;
back_from_confirm:

	rfh.iov		= msg->msg_iov;
	rfh.saddr	= rt->rt_src;
	rfh.dst		= &rt->u.dst;
	if (!ipc.addr)
		ipc.addr = rt->rt_dst;
	err = ip_build_xmit(sk, sk->protinfo.af_inet.hdrincl ? raw_getrawfrag :
		       	    raw_getfrag, &rfh, len, &ipc, rt, msg->msg_flags);

done:
	if (free)
		kfree(ipc.opt);
	ip_rt_put(rt);

out:	return err < 0 ? err : len;

do_confirm:
	dst_confirm(&rt->u.dst);
	if (!(msg->msg_flags & MSG_PROBE) || len)
		goto back_from_confirm;
	err = 0;
	goto done;
}
Ejemplo n.º 21
0
/* Grrr, addr_type already calculated by caller, but I don't want
 * to add some silly "cookie" argument to this method just for that.
 */
static int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
	write_lock_bh(&udp_hash_lock);
	if (snum == 0) {
		int best_size_so_far, best, result, i;

		if (udp_port_rover > sysctl_local_port_range[1] ||
		    udp_port_rover < sysctl_local_port_range[0])
			udp_port_rover = sysctl_local_port_range[0];
		best_size_so_far = 32767;
		best = result = udp_port_rover;
		for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
			struct sock *sk;
			int size;

			sk = udp_hash[result & (UDP_HTABLE_SIZE - 1)];
			if (!sk) {
				if (result > sysctl_local_port_range[1])
					result = sysctl_local_port_range[0] +
						((result - sysctl_local_port_range[0]) &
						 (UDP_HTABLE_SIZE - 1));
				goto gotit;
			}
			size = 0;
			do {
				if (++size >= best_size_so_far)
					goto next;
			} while ((sk = sk->next) != NULL);
			best_size_so_far = size;
			best = result;
		next:;
		}
		result = best;
		for(;; result += UDP_HTABLE_SIZE) {
			if (result > sysctl_local_port_range[1])
				result = sysctl_local_port_range[0]
					+ ((result - sysctl_local_port_range[0]) &
					   (UDP_HTABLE_SIZE - 1));
			if (!udp_lport_inuse(result))
				break;
		}
gotit:
		udp_port_rover = snum = result;
	} else {
		struct sock *sk2;
		int sk_reuse, sk2_reuse;
		int addr_type = ipv6_addr_type(&sk->net_pinfo.af_inet6.rcv_saddr),
		    addr_type2;
#if defined(CONFIG_NET_RESTRICTED_REUSE) || defined(CONFIG_IPV6_RESTRICTED_DOUBLE_BIND)
		uid_t sk_uid = sock_i_uid_t(sk),
		      sk2_uid;
#endif

		sk_reuse = 0;
		if (sk->reuse)
			sk_reuse |= 1;
#ifdef SO_REUSEPORT
		if (sk->reuseport)
			sk_reuse |= 2;
#endif
		if (sk_reuse &&
		    (addr_type != IPV6_ADDR_MAPPED ? (addr_type & IPV6_ADDR_MULTICAST) : MULTICAST(sk->rcv_saddr)))
			sk_reuse |= 4;

		for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
		     sk2 != NULL;
		     sk2 = sk2->next) {
#if 1	/* XXX: should be recoded like 2.4.21 */
#if defined(CONFIG_NET_RESTRICTED_REUSE) || defined(CONFIG_IPV6_RESTRICTED_DOUBLE_BIND)
			int uid_ok;
#endif
			int both_specified = 0;

			if (sk2->num != snum ||
			    sk2 == sk ||
			    (sk2->bound_dev_if && sk->bound_dev_if &&
			     sk2->bound_dev_if != sk->bound_dev_if))
				continue;
#if 0
			if (sk2->family != AF_INET6 && sk2->family != AF_INET)
				continue;
#endif

			addr_type2 = sk2->family == AF_INET6 ? ipv6_addr_type(&sk2->net_pinfo.af_inet6.rcv_saddr) : IPV6_ADDR_MAPPED;
#if defined(CONFIG_NET_RESTRICTED_REUSE) || defined(CONFIG_IPV6_RESTRICTED_DOUBLE_BIND)
			sk2_uid = sock_i_uid_t(sk2);
#endif

			if ((addr_type2 != IPV6_ADDR_MAPPED ? addr_type2 != IPV6_ADDR_ANY : sk2->rcv_saddr) &&
			    (addr_type != IPV6_ADDR_MAPPED ? addr_type != IPV6_ADDR_ANY : sk->rcv_saddr)) {
				if (addr_type2 == IPV6_ADDR_MAPPED || addr_type == IPV6_ADDR_MAPPED) {
					if (addr_type2 != addr_type ||
					    sk2->rcv_saddr != sk->rcv_saddr)
						continue;
				} else {
					if (ipv6_addr_cmp(&sk2->net_pinfo.af_inet6.rcv_saddr,
							  &sk->net_pinfo.af_inet6.rcv_saddr))
						continue;
				}
				both_specified = 1;
			}

#if defined(CONFIG_NET_RESTRICTED_REUSE) || defined(CONFIG_IPV6_RESTRICTED_DOUBLE_BIND)
			uid_ok = sk2_uid == (uid_t) -1 || sk_uid == sk2_uid;
#endif

			if ((addr_type2 == IPV6_ADDR_MAPPED && 
			     addr_type != IPV6_ADDR_MAPPED && sk->net_pinfo.af_inet6.ipv6only) ||
			    (addr_type == IPV6_ADDR_MAPPED &&
			     addr_type2 != IPV6_ADDR_MAPPED && sk2->net_pinfo.af_inet6.ipv6only)) {
#ifdef CONFIG_IPV6_RESTRICTED_DOUBLE_BIND
				if (sysctl_ipv6_bindv6only_restriction == 0 || uid_ok)
					continue;
#else
				continue;
#endif
			}

			sk2_reuse = 0;
			if (sk2->reuse)
				sk2_reuse |= 1;
#ifdef SO_REUSEPORT
			if (sk2->reuseport)
				sk2_reuse |= 2;
#endif
			if (sk2_reuse &&
			    (addr_type2 != IPV6_ADDR_MAPPED ? (addr_type2 & IPV6_ADDR_MULTICAST) : MULTICAST(sk2->rcv_saddr)))
				sk2_reuse |= 4;

			if (sk2_reuse & sk_reuse & 3) {	/* NOT && */
				if (sk2_reuse & sk_reuse & 4)
					continue;
#ifdef CONFIG_NET_RESTRICTED_REUSE
				if (!uid_ok)
					goto fail;
#endif
#ifdef SO_REUSEPORT
				if (sk2_reuse & sk_reuse & 2)
					continue;
#endif
				if (both_specified) {
					int addr_type2d = sk2->family == AF_INET6 ? ipv6_addr_type(&sk2->net_pinfo.af_inet6.daddr) : IPV6_ADDR_MAPPED;
					if (addr_type2d != IPV6_ADDR_MAPPED ? addr_type2d != IPV6_ADDR_ANY : sk2->daddr)
						continue;
				} else {
					if ((addr_type2 != IPV6_ADDR_MAPPED ? addr_type2 != IPV6_ADDR_ANY : sk2->rcv_saddr) || 
					    (addr_type != IPV6_ADDR_MAPPED ? addr_type != IPV6_ADDR_ANY : sk->rcv_saddr))
						continue;
				}
			}
			goto fail;
#else	/* XXX: should be recoded like 2.4.21 */
			if (sk2->num == snum &&
			    sk2 != sk &&
			    (!sk2->bound_dev_if ||
			     !sk->bound_dev_if ||
			     sk2->bound_dev_if == sk->bound_dev_if) &&
			    ((!sk2->rcv_saddr && !ipv6_only_sock(sk)) ||
			     (sk2->family == AF_INET6 && 
			      ipv6_addr_any(&sk2->net_pinfo.af_inet6.rcv_saddr) &&
			      !(ipv6_only_sock(sk2) && addr_type == IPV6_ADDR_MAPPED)) ||
			     (addr_type == IPV6_ADDR_ANY && 
			      (!ipv6_only_sock(sk) || 
			       !(sk2->family == AF_INET6 ? (ipv6_addr_type(&sk2->net_pinfo.af_inet6.rcv_saddr) == IPV6_ADDR_MAPPED) : 1))) ||
			     (sk2->family == AF_INET6 && 
			      !ipv6_addr_cmp(&sk->net_pinfo.af_inet6.rcv_saddr,
					     &sk2->net_pinfo.af_inet6.rcv_saddr)) ||
			     (addr_type == IPV6_ADDR_MAPPED &&
			      !ipv6_only_sock(sk2) &&
			      (!sk2->rcv_saddr || 
			       !sk->rcv_saddr ||
			       sk->rcv_saddr == sk2->rcv_saddr))) &&
			    (!sk2->reuse || !sk->reuse))
				goto fail;
#endif	/* XXX: should be recoded like 2.4.21 */
		}
	}

	sk->num = snum;
	if (sk->pprev == NULL) {
		struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
		if ((sk->next = *skp) != NULL)
			(*skp)->pprev = &sk->next;
		*skp = sk;
		sk->pprev = skp;
		sock_prot_inc_use(sk->prot);
		sock_hold(sk);
	}
	write_unlock_bh(&udp_hash_lock);
	return 0;

fail:
	write_unlock_bh(&udp_hash_lock);
	return 1;
}
Ejemplo n.º 22
0
void ssa_nodse_stat_entrance(struct sk_buff *skb, u32 dir)
{
    u32 packet_len = (u32)ntohs(skb->nh.iph->tot_len) + SSA_MAC_HEAD_LENGTH;
    s32 in_if_num;
    s32 out_if_num;
    u32 src_ip = skb->nh.iph->saddr;
    u32 dst_ip = skb->nh.iph->daddr;
    u32 dst_match_flag = 0;
    u32 src_match_flag = 0;
    u32 app_id = skb_get_application_id(skb);
    s32 i;
	struct net_device dev;
	u32 dev_type = 0;

    if (app_id >= SSA_APP_NUM_MAX)
    {
        app_id = 127;
    }
    
    if(LOOPBACK(src_ip) || MULTICAST(src_ip) || BADCLASS(src_ip) || ZERONET(src_ip) || LOCAL_MCAST(src_ip) || BROADCAST(src_ip) ||
          LOOPBACK(dst_ip) || MULTICAST(dst_ip) || BADCLASS(dst_ip) || ZERONET(dst_ip) || LOCAL_MCAST(dst_ip) || BROADCAST(dst_ip))
    {
        return ;
    }
    
    if (SSA_ON==g_ssa_conf.if_log_switch)
    {    	
		in_if_num = if_dev_get_phy_serial_num(skb->in_if);
		
		dev.ifindex = skb->in_if;
		eth_dev_ioctl( &dev, (void *)&dev_type, SIOCGPRIVATEIFTYPE);		
		if(dir == STREAM_IN||DEV_IFTYPE_SNIFF == dev_type)
    	{
	    	ssa_if_stat_entrance(packet_len, in_if_num, app_id, STREAM_IN);
			if (SSA_ON==g_ssa_firstpage_info.stat_flag)
			{				
				ssa_firstpage_data_stat(packet_len, in_if_num, STREAM_IN);
			}
    	}
		else
		{
			out_if_num = if_dev_get_phy_serial_num(skb->out_if);			
	    	ssa_if_stat_entrance(packet_len, in_if_num,  app_id, STREAM_IN);
	    	ssa_if_stat_entrance(packet_len, out_if_num, app_id, STREAM_OUT);
	    	if (SSA_ON==g_ssa_firstpage_info.stat_flag)
			{					
				ssa_firstpage_data_stat(packet_len, in_if_num, STREAM_IN);
				ssa_firstpage_data_stat(packet_len, out_if_num, STREAM_OUT);
			}
		}
    }


    if (g_ssa_conf.ip_log_switch == SSA_ON && dir == STREAM_OUT)
    {
        for(i = 0; (i < SSA_USER_GROUP_NUM)&&(g_ssa_conf.stat_group_id[i] != 0); i++)
        {
            if(1 == dst_match_flag && 1 == src_match_flag)
            {
                return;
            }
         
            if((0 == src_match_flag) && (net_user_ip_in_list == net_user_get_by_id_ip(g_ssa_conf.stat_group_id[i], src_ip)))
            {              
                src_match_flag = 1;
             
                ssa_ip_stat_entrance(packet_len, 1, 0, 0, src_ip, g_ssa_conf.stat_group_id[i], app_id, SSA_STREAM_UP, NULL);
            }
        
			if((0 == dst_match_flag) && (net_user_ip_in_list == net_user_get_by_id_ip(g_ssa_conf.stat_group_id[i], dst_ip)))
			{
				dst_match_flag = 1;			 
				ssa_ip_stat_entrance(packet_len, 1, 0, 0, dst_ip, g_ssa_conf.stat_group_id[i], app_id, SSA_STREAM_DOWN, NULL);
			}
        }

    }
    
    return;

}