예제 #1
0
int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
{
	struct compat_timespec __user *ctv;
	int err;
	struct timespec ts;

	if (COMPAT_USE_64BIT_TIME)
		return sock_get_timestampns (sk, userstamp);

	ctv = (struct compat_timespec __user *) userstamp;
	err = -ENOENT;
	if (!sock_flag(sk, SOCK_TIMESTAMP))
		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
	ts = ktime_to_timespec(sk->sk_stamp);
	if (ts.tv_sec == -1)
		return err;
	if (ts.tv_sec == 0) {
		sk->sk_stamp = ktime_get_real();
		ts = ktime_to_timespec(sk->sk_stamp);
	}
	err = 0;
	if (put_user(ts.tv_sec, &ctv->tv_sec) ||
			put_user(ts.tv_nsec, &ctv->tv_nsec))
		err = -EFAULT;
	return err;
}
예제 #2
0
int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
{
	struct compat_timeval __user *ctv =
			(struct compat_timeval __user*) userstamp;
	int err = -ENOENT;

	if (!sock_flag(sk, SOCK_TIMESTAMP))
		sock_enable_timestamp(sk);
	if (sk->sk_stamp.tv_sec == -1)
		return err;
	if (sk->sk_stamp.tv_sec == 0)
		do_gettimeofday(&sk->sk_stamp);
	if (put_user(sk->sk_stamp.tv_sec, &ctv->tv_sec) ||
			put_user(sk->sk_stamp.tv_usec, &ctv->tv_usec))
		err = -EFAULT;
	return err;
}
예제 #3
0
int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
{
	struct compat_timeval __user *ctv =
			(struct compat_timeval __user *) userstamp;
	int err = -ENOENT;
	struct timeval tv;

	if (!sock_flag(sk, SOCK_TIMESTAMP))
		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
	tv = ktime_to_timeval(sk->sk_stamp);
	if (tv.tv_sec == -1)
		return err;
	if (tv.tv_sec == 0) {
		sk->sk_stamp = ktime_get_real();
		tv = ktime_to_timeval(sk->sk_stamp);
	}
	err = 0;
	if (put_user(tv.tv_sec, &ctv->tv_sec) ||
			put_user(tv.tv_usec, &ctv->tv_usec))
		err = -EFAULT;
	return err;
}
예제 #4
0
int sock_setsockopt(struct socket *sock, int level, int optname,
		    char __user *optval, int optlen)
{
	struct sock *sk=sock->sk;
	struct sk_filter *filter;
	int val;
	int valbool;
	struct linger ling;
	int ret = 0;
	
	/*
	 *	Options without arguments
	 */

#ifdef SO_DONTLINGER		/* Compatibility item... */
	if (optname == SO_DONTLINGER) {
		lock_sock(sk);
		sock_reset_flag(sk, SOCK_LINGER);
		release_sock(sk);
		return 0;
	}
#endif
	
  	if(optlen<sizeof(int))
  		return(-EINVAL);
  	
	if (get_user(val, (int __user *)optval))
		return -EFAULT;
	
  	valbool = val?1:0;

	lock_sock(sk);

  	switch(optname) 
  	{
		case SO_DEBUG:	
			if(val && !capable(CAP_NET_ADMIN))
			{
				ret = -EACCES;
			}
			else if (valbool)
				sock_set_flag(sk, SOCK_DBG);
			else
				sock_reset_flag(sk, SOCK_DBG);
			break;
		case SO_REUSEADDR:
			sk->sk_reuse = valbool;
			break;
		case SO_TYPE:
		case SO_ERROR:
			ret = -ENOPROTOOPT;
		  	break;
		case SO_DONTROUTE:
			if (valbool)
				sock_set_flag(sk, SOCK_LOCALROUTE);
			else
				sock_reset_flag(sk, SOCK_LOCALROUTE);
			break;
		case SO_BROADCAST:
			sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
			break;
		case SO_SNDBUF:
			/* Don't error on this BSD doesn't and if you think
			   about it this is right. Otherwise apps have to
			   play 'guess the biggest size' games. RCVBUF/SNDBUF
			   are treated in BSD as hints */
			   
			if (val > sysctl_wmem_max)
				val = sysctl_wmem_max;
set_sndbuf:
			sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
			if ((val * 2) < SOCK_MIN_SNDBUF)
				sk->sk_sndbuf = SOCK_MIN_SNDBUF;
			else
				sk->sk_sndbuf = val * 2;

			/*
			 *	Wake up sending tasks if we
			 *	upped the value.
			 */
			sk->sk_write_space(sk);
			break;

		case SO_SNDBUFFORCE:
			if (!capable(CAP_NET_ADMIN)) {
				ret = -EPERM;
				break;
			}
			goto set_sndbuf;

		case SO_RCVBUF:
			/* Don't error on this BSD doesn't and if you think
			   about it this is right. Otherwise apps have to
			   play 'guess the biggest size' games. RCVBUF/SNDBUF
			   are treated in BSD as hints */
			  
			if (val > sysctl_rmem_max)
				val = sysctl_rmem_max;
set_rcvbuf:
			sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
			/*
			 * We double it on the way in to account for
			 * "struct sk_buff" etc. overhead.   Applications
			 * assume that the SO_RCVBUF setting they make will
			 * allow that much actual data to be received on that
			 * socket.
			 *
			 * Applications are unaware that "struct sk_buff" and
			 * other overheads allocate from the receive buffer
			 * during socket buffer allocation.
			 *
			 * And after considering the possible alternatives,
			 * returning the value we actually used in getsockopt
			 * is the most desirable behavior.
			 */
			if ((val * 2) < SOCK_MIN_RCVBUF)
				sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
			else
				sk->sk_rcvbuf = val * 2;
			break;

		case SO_RCVBUFFORCE:
			if (!capable(CAP_NET_ADMIN)) {
				ret = -EPERM;
				break;
			}
			goto set_rcvbuf;

		case SO_KEEPALIVE:
#ifdef CONFIG_INET
			if (sk->sk_protocol == IPPROTO_TCP)
				tcp_set_keepalive(sk, valbool);
#endif
			sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
			break;

	 	case SO_OOBINLINE:
			sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
			break;

	 	case SO_NO_CHECK:
			sk->sk_no_check = valbool;
			break;

		case SO_PRIORITY:
			if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 
				sk->sk_priority = val;
			else
				ret = -EPERM;
			break;

		case SO_LINGER:
			if(optlen<sizeof(ling)) {
				ret = -EINVAL;	/* 1003.1g */
				break;
			}
			if (copy_from_user(&ling,optval,sizeof(ling))) {
				ret = -EFAULT;
				break;
			}
			if (!ling.l_onoff)
				sock_reset_flag(sk, SOCK_LINGER);
			else {
#if (BITS_PER_LONG == 32)
				if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
					sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
				else
#endif
					sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
				sock_set_flag(sk, SOCK_LINGER);
			}
			break;

		case SO_BSDCOMPAT:
			sock_warn_obsolete_bsdism("setsockopt");
			break;

		case SO_PASSCRED:
			if (valbool)
				set_bit(SOCK_PASSCRED, &sock->flags);
			else
				clear_bit(SOCK_PASSCRED, &sock->flags);
			break;

		case SO_TIMESTAMP:
			if (valbool)  {
				sock_set_flag(sk, SOCK_RCVTSTAMP);
				sock_enable_timestamp(sk);
			} else
				sock_reset_flag(sk, SOCK_RCVTSTAMP);
			break;

		case SO_RCVLOWAT:
			if (val < 0)
				val = INT_MAX;
			sk->sk_rcvlowat = val ? : 1;
			break;

		case SO_RCVTIMEO:
			ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
			break;

		case SO_SNDTIMEO:
			ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
			break;

#ifdef CONFIG_NETDEVICES
		case SO_BINDTODEVICE:
		{
			char devname[IFNAMSIZ]; 

			/* Sorry... */ 
			if (!capable(CAP_NET_RAW)) {
				ret = -EPERM;
				break;
			}

			/* Bind this socket to a particular device like "eth0",
			 * as specified in the passed interface name. If the
			 * name is "" or the option length is zero the socket 
			 * is not bound. 
			 */ 

			if (!valbool) {
				sk->sk_bound_dev_if = 0;
			} else {
				if (optlen > IFNAMSIZ - 1)
					optlen = IFNAMSIZ - 1;
				memset(devname, 0, sizeof(devname));
				if (copy_from_user(devname, optval, optlen)) {
					ret = -EFAULT;
					break;
				}

				/* Remove any cached route for this socket. */
				sk_dst_reset(sk);

				if (devname[0] == '\0') {
					sk->sk_bound_dev_if = 0;
				} else {
					struct net_device *dev = dev_get_by_name(devname);
					if (!dev) {
						ret = -ENODEV;
						break;
					}
					sk->sk_bound_dev_if = dev->ifindex;
					dev_put(dev);
				}
			}
			break;
		}
#endif


		case SO_ATTACH_FILTER:
			ret = -EINVAL;
			if (optlen == sizeof(struct sock_fprog)) {
				struct sock_fprog fprog;

				ret = -EFAULT;
				if (copy_from_user(&fprog, optval, sizeof(fprog)))
					break;

				ret = sk_attach_filter(&fprog, sk);
			}
			break;

		case SO_DETACH_FILTER:
			spin_lock_bh(&sk->sk_lock.slock);
			filter = sk->sk_filter;
                        if (filter) {
				sk->sk_filter = NULL;
				spin_unlock_bh(&sk->sk_lock.slock);
				sk_filter_release(sk, filter);
				break;
			}
			spin_unlock_bh(&sk->sk_lock.slock);
			ret = -ENONET;
			break;

		/* We implement the SO_SNDLOWAT etc to
		   not be settable (1003.1g 5.3) */
		default:
		  	ret = -ENOPROTOOPT;
			break;
  	}
	release_sock(sk);
	return ret;
}
예제 #5
0
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,  struct packet_type *pt)
{
	struct sock *sk;
	struct packet_sock *po;
	struct sockaddr_ll *sll;
	struct tpacket_hdr *h;
	u8 * skb_head = skb->data;
	int skb_len = skb->len;
	unsigned snaplen;
	unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
	unsigned short macoff, netoff;
	struct sk_buff *copy_skb = NULL;

	if (skb->pkt_type == PACKET_LOOPBACK)
		goto drop;

	sk = pt->af_packet_priv;
	po = pkt_sk(sk);

	if (dev->hard_header) {
		if (sk->sk_type != SOCK_DGRAM)
			skb_push(skb, skb->data - skb->mac.raw);
		else if (skb->pkt_type == PACKET_OUTGOING) {
			/* Special case: outgoing packets have ll header at head */
			skb_pull(skb, skb->nh.raw - skb->data);
			if (skb->ip_summed == CHECKSUM_HW)
				status |= TP_STATUS_CSUMNOTREADY;
		}
	}

	snaplen = skb->len;

	if (sk->sk_filter) {
		unsigned res = run_filter(skb, sk, snaplen);
		if (res == 0)
			goto drop_n_restore;
		if (snaplen > res)
			snaplen = res;
	}

	if (sk->sk_type == SOCK_DGRAM) {
		macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
	} else {
		unsigned maclen = skb->nh.raw - skb->data;
		netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen));
		macoff = netoff - maclen;
	}

	if (macoff + snaplen > po->frame_size) {
		if (po->copy_thresh &&
		    atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
		    (unsigned)sk->sk_rcvbuf) {
			if (skb_shared(skb)) {
				copy_skb = skb_clone(skb, GFP_ATOMIC);
			} else {
				copy_skb = skb_get(skb);
				skb_head = skb->data;
			}
			if (copy_skb)
				skb_set_owner_r(copy_skb, sk);
		}
		snaplen = po->frame_size - macoff;
		if ((int)snaplen < 0)
			snaplen = 0;
	}
	if (snaplen > skb->len-skb->data_len)
		snaplen = skb->len-skb->data_len;

	spin_lock(&sk->sk_receive_queue.lock);
	h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head);
	
	if (h->tp_status)
		goto ring_is_full;
	po->head = po->head != po->frame_max ? po->head+1 : 0;
	po->stats.tp_packets++;
	if (copy_skb) {
		status |= TP_STATUS_COPY;
		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
	}
	if (!po->stats.tp_drops)
		status &= ~TP_STATUS_LOSING;
	spin_unlock(&sk->sk_receive_queue.lock);

	memcpy((u8*)h + macoff, skb->data, snaplen);

	h->tp_len = skb->len;
	h->tp_snaplen = snaplen;
	h->tp_mac = macoff;
	h->tp_net = netoff;
	if (skb->stamp.tv_sec == 0) { 
		do_gettimeofday(&skb->stamp);
		sock_enable_timestamp(sk);
	}
	h->tp_sec = skb->stamp.tv_sec;
	h->tp_usec = skb->stamp.tv_usec;

	sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
	sll->sll_halen = 0;
	if (dev->hard_header_parse)
		sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
	sll->sll_family = AF_PACKET;
	sll->sll_hatype = dev->type;
	sll->sll_protocol = skb->protocol;
	sll->sll_pkttype = skb->pkt_type;
	sll->sll_ifindex = dev->ifindex;

	h->tp_status = status;
	mb();

	{
		struct page *p_start, *p_end;
		u8 *h_end = (u8 *)h + macoff + snaplen - 1;

		p_start = virt_to_page(h);
		p_end = virt_to_page(h_end);
		while (p_start <= p_end) {
			flush_dcache_page(p_start);
			p_start++;
		}
	}

	sk->sk_data_ready(sk, 0);

drop_n_restore:
	if (skb_head != skb->data && skb_shared(skb)) {
		skb->data = skb_head;
		skb->len = skb_len;
	}
drop:
        kfree_skb(skb);
	return 0;

ring_is_full:
	po->stats.tp_drops++;
	spin_unlock(&sk->sk_receive_queue.lock);

	sk->sk_data_ready(sk, 0);
	if (copy_skb)
		kfree_skb(copy_skb);
	goto drop_n_restore;
}