/**
 * dump_common_audit_data - helper to dump common audit data
 * @a : common audit data
 *
 */
static void dump_common_audit_data(struct audit_buffer *ab,
                                   struct common_audit_data *a)
{
    struct task_struct *tsk = current;

    audit_log_format(ab, " pid=%d comm=", tsk->pid);
    audit_log_untrustedstring(ab, tsk->comm);

    switch (a->type) {
    case LSM_AUDIT_DATA_NONE:
        return;
    case LSM_AUDIT_DATA_IPC:
        audit_log_format(ab, " key=%d ", a->u.ipc_id);
        break;
    case LSM_AUDIT_DATA_CAP:
        audit_log_format(ab, " capability=%d ", a->u.cap);
        break;
    case LSM_AUDIT_DATA_PATH: {
        struct inode *inode;

        audit_log_d_path(ab, " path=", &a->u.path);

        inode = a->u.path.dentry->d_inode;
        if (inode) {
            audit_log_format(ab, " dev=");
            audit_log_untrustedstring(ab, inode->i_sb->s_id);
            audit_log_format(ab, " ino=%lu", inode->i_ino);
        }
        break;
    }
    case LSM_AUDIT_DATA_IOCTL_OP: {
        struct inode *inode;

        audit_log_d_path(ab, " path=", &a->u.op->path);

        inode = a->u.op->path.dentry->d_inode;
        if (inode) {
            audit_log_format(ab, " dev=");
            audit_log_untrustedstring(ab, inode->i_sb->s_id);
            audit_log_format(ab, " ino=%lu", inode->i_ino);
        }

        audit_log_format(ab, " ioctlcmd=%hx", a->u.op->cmd);
        break;
    }
    case LSM_AUDIT_DATA_DENTRY: {
        struct inode *inode;

        audit_log_format(ab, " name=");
        audit_log_untrustedstring(ab, a->u.dentry->d_name.name);

        inode = a->u.dentry->d_inode;
        if (inode) {
            audit_log_format(ab, " dev=");
            audit_log_untrustedstring(ab, inode->i_sb->s_id);
            audit_log_format(ab, " ino=%lu", inode->i_ino);
        }
        break;
    }
    case LSM_AUDIT_DATA_INODE: {
        struct dentry *dentry;
        struct inode *inode;

        inode = a->u.inode;
        dentry = d_find_alias(inode);
        if (dentry) {
            audit_log_format(ab, " name=");
            audit_log_untrustedstring(ab,
                                      dentry->d_name.name);
            dput(dentry);
        }
        audit_log_format(ab, " dev=");
        audit_log_untrustedstring(ab, inode->i_sb->s_id);
        audit_log_format(ab, " ino=%lu", inode->i_ino);
        break;
    }
    case LSM_AUDIT_DATA_TASK:
        tsk = a->u.tsk;
        if (tsk && tsk->pid) {
            audit_log_format(ab, " pid=%d comm=", tsk->pid);
            audit_log_untrustedstring(ab, tsk->comm);
        }
        break;
    case LSM_AUDIT_DATA_NET:
        if (a->u.net->sk) {
            struct sock *sk = a->u.net->sk;
            struct unix_sock *u;
            int len = 0;
            char *p = NULL;

            switch (sk->sk_family) {
            case AF_INET: {
                struct inet_sock *inet = inet_sk(sk);

                print_ipv4_addr(ab, inet->inet_rcv_saddr,
                                inet->inet_sport,
                                "laddr", "lport");
                print_ipv4_addr(ab, inet->inet_daddr,
                                inet->inet_dport,
                                "faddr", "fport");
                break;
            }
            case AF_INET6: {
                struct inet_sock *inet = inet_sk(sk);
                struct ipv6_pinfo *inet6 = inet6_sk(sk);

                print_ipv6_addr(ab, &inet6->rcv_saddr,
                                inet->inet_sport,
                                "laddr", "lport");
                print_ipv6_addr(ab, &inet6->daddr,
                                inet->inet_dport,
                                "faddr", "fport");
                break;
            }
            case AF_UNIX:
                u = unix_sk(sk);
                if (u->path.dentry) {
                    audit_log_d_path(ab, " path=", &u->path);
                    break;
                }
                if (!u->addr)
                    break;
                len = u->addr->len-sizeof(short);
                p = &u->addr->name->sun_path[0];
                audit_log_format(ab, " path=");
                if (*p)
                    audit_log_untrustedstring(ab, p);
                else
                    audit_log_n_hex(ab, p, len);
                break;
            }
        }

        switch (a->u.net->family) {
        case AF_INET:
            print_ipv4_addr(ab, a->u.net->v4info.saddr,
                            a->u.net->sport,
                            "saddr", "src");
            print_ipv4_addr(ab, a->u.net->v4info.daddr,
                            a->u.net->dport,
                            "daddr", "dest");
            break;
        case AF_INET6:
            print_ipv6_addr(ab, &a->u.net->v6info.saddr,
                            a->u.net->sport,
                            "saddr", "src");
            print_ipv6_addr(ab, &a->u.net->v6info.daddr,
                            a->u.net->dport,
                            "daddr", "dest");
            break;
        }
        if (a->u.net->netif > 0) {
            struct net_device *dev;

            /* NOTE: we always use init's namespace */
            dev = dev_get_by_index(&init_net, a->u.net->netif);
            if (dev) {
                audit_log_format(ab, " netif=%s", dev->name);
                dev_put(dev);
            }
        }
        break;
#ifdef CONFIG_KEYS
    case LSM_AUDIT_DATA_KEY:
        audit_log_format(ab, " key_serial=%u", a->u.key_struct.key);
        if (a->u.key_struct.key_desc) {
            audit_log_format(ab, " key_desc=");
            audit_log_untrustedstring(ab, a->u.key_struct.key_desc);
        }
        break;
#endif
    case LSM_AUDIT_DATA_KMOD:
        audit_log_format(ab, " kmod=");
        audit_log_untrustedstring(ab, a->u.kmod_name);
        break;
    } /* switch (a->type) */
}
Exemple #2
0
static int raw_setsockopt(struct socket *sock, int level, int optname,
			  char __user *optval, unsigned int optlen)
{
	struct sock *sk = sock->sk;
	struct raw_sock *ro = raw_sk(sk);
	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
	struct can_filter sfilter;         /* single filter */
	struct net_device *dev = NULL;
	can_err_mask_t err_mask = 0;
	int count = 0;
	int err = 0;

	if (level != SOL_CAN_RAW)
		return -EINVAL;

	switch (optname) {

	case CAN_RAW_FILTER:
		if (optlen % sizeof(struct can_filter) != 0)
			return -EINVAL;

		count = optlen / sizeof(struct can_filter);

		if (count > 1) {
			/* filter does not fit into dfilter => alloc space */
			filter = memdup_user(optval, optlen);
			if (IS_ERR(filter))
				return PTR_ERR(filter);
		} else if (count == 1) {
			if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
				return -EFAULT;
		}

		lock_sock(sk);

		if (ro->bound && ro->ifindex)
			dev = dev_get_by_index(&init_net, ro->ifindex);

		if (ro->bound) {
			/* (try to) register the new filters */
			if (count == 1)
				err = raw_enable_filters(dev, sk, &sfilter, 1);
			else
				err = raw_enable_filters(dev, sk, filter,
							 count);
			if (err) {
				if (count > 1)
					kfree(filter);
				goto out_fil;
			}

			/* remove old filter registrations */
			raw_disable_filters(dev, sk, ro->filter, ro->count);
		}

		/* remove old filter space */
		if (ro->count > 1)
			kfree(ro->filter);

		/* link new filters to the socket */
		if (count == 1) {
			/* copy filter data for single filter */
			ro->dfilter = sfilter;
			filter = &ro->dfilter;
		}
		ro->filter = filter;
		ro->count  = count;

 out_fil:
		if (dev)
			dev_put(dev);

		release_sock(sk);

		break;

	case CAN_RAW_ERR_FILTER:
		if (optlen != sizeof(err_mask))
			return -EINVAL;

		if (copy_from_user(&err_mask, optval, optlen))
			return -EFAULT;

		err_mask &= CAN_ERR_MASK;

		lock_sock(sk);

		if (ro->bound && ro->ifindex)
			dev = dev_get_by_index(&init_net, ro->ifindex);

		/* remove current error mask */
		if (ro->bound) {
			/* (try to) register the new err_mask */
			err = raw_enable_errfilter(dev, sk, err_mask);

			if (err)
				goto out_err;

			/* remove old err_mask registration */
			raw_disable_errfilter(dev, sk, ro->err_mask);
		}

		/* link new err_mask to the socket */
		ro->err_mask = err_mask;

 out_err:
		if (dev)
			dev_put(dev);

		release_sock(sk);

		break;

	case CAN_RAW_LOOPBACK:
		if (optlen != sizeof(ro->loopback))
			return -EINVAL;

		if (copy_from_user(&ro->loopback, optval, optlen))
			return -EFAULT;

		break;

	case CAN_RAW_RECV_OWN_MSGS:
		if (optlen != sizeof(ro->recv_own_msgs))
			return -EINVAL;

		if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
			return -EFAULT;

		break;

	default:
		return -ENOPROTOOPT;
	}
	return err;
}
static int CVE_2010_3848_linux2_6_23_econet_sendmsg(struct kiocb *iocb, struct socket *sock,
			  struct msghdr *msg, size_t len)
{
	struct sock *sk = sock->sk;
	struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
	struct net_device *dev;
	struct ec_addr addr;
	int err;
	unsigned char port, cb;
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
	struct sk_buff *skb;
	struct ec_cb *eb;
#endif
#ifdef CONFIG_ECONET_AUNUDP
	struct msghdr udpmsg;
	struct iovec iov[msg->msg_iovlen+1];
	struct aunhdr ah;
	struct sockaddr_in udpdest;
	__kernel_size_t size;
	int i;
	mm_segment_t oldfs;
#endif

	/*
	 *	Check the flags.
	 */

	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
		return -EINVAL;

	/*
	 *	Get and verify the address.
	 */

	mutex_lock(&econet_mutex);

	if (saddr == NULL) {
		struct econet_sock *eo = ec_sk(sk);

		addr.station = eo->station;
		addr.net     = eo->net;
		port	     = eo->port;
		cb	     = eo->cb;
	} else {
		if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
			mutex_unlock(&econet_mutex);
			return -EINVAL;
		}
		addr.station = saddr->addr.station;
		addr.net = saddr->addr.net;
		port = saddr->port;
		cb = saddr->cb;
	}

	/* Look for a device with the right network number. */
	dev = net2dev_map[addr.net];

	/* If not directly reachable, use some default */
	if (dev == NULL) {
		dev = net2dev_map[0];
		/* No interfaces at all? */
		if (dev == NULL) {
			mutex_unlock(&econet_mutex);
			return -ENETDOWN;
		}
	}

	if (len + 15 > dev->mtu) {
		mutex_unlock(&econet_mutex);
		return -EMSGSIZE;
	}

	if (dev->type == ARPHRD_ECONET) {
		/* Real hardware Econet.  We're not worthy etc. */
#ifdef CONFIG_ECONET_NATIVE
		unsigned short proto = 0;

		dev_hold(dev);

		skb = sock_alloc_send_skb(sk, len+LL_RESERVED_SPACE(dev),
					  msg->msg_flags & MSG_DONTWAIT, &err);
		if (skb==NULL)
			goto out_unlock;

		skb_reserve(skb, LL_RESERVED_SPACE(dev));
		skb_reset_network_header(skb);

		eb = (struct ec_cb *)&skb->cb;

		/* BUG: saddr may be NULL */
		eb->cookie = saddr->cookie;
		eb->sec = *saddr;
		eb->sent = ec_tx_done;

		if (dev->hard_header) {
			int res;
			struct ec_framehdr *fh;
			err = -EINVAL;
			res = dev->hard_header(skb, dev, ntohs(proto),
					       &addr, NULL, len);
			/* Poke in our control byte and
			   port number.  Hack, hack.  */
			fh = (struct ec_framehdr *)(skb->data);
			fh->cb = cb;
			fh->port = port;
			if (sock->type != SOCK_DGRAM) {
				skb_reset_tail_pointer(skb);
				skb->len = 0;
			} else if (res < 0)
				goto out_free;
		}

		/* Copy the data. Returns -EFAULT on error */
		err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
		skb->protocol = proto;
		skb->dev = dev;
		skb->priority = sk->sk_priority;
		if (err)
			goto out_free;

		err = -ENETDOWN;
		if (!(dev->flags & IFF_UP))
			goto out_free;

		/*
		 *	Now send it
		 */

		dev_queue_xmit(skb);
		dev_put(dev);
		mutex_unlock(&econet_mutex);
		return(len);

	out_free:
		kfree_skb(skb);
	out_unlock:
		if (dev)
			dev_put(dev);
#else
		err = -EPROTOTYPE;
#endif
		mutex_unlock(&econet_mutex);

		return err;
	}

#ifdef CONFIG_ECONET_AUNUDP
	/* AUN virtual Econet. */

	if (udpsock == NULL) {
		mutex_unlock(&econet_mutex);
		return -ENETDOWN;		/* No socket - can't send */
	}

	/* Make up a UDP datagram and hand it off to some higher intellect. */

	memset(&udpdest, 0, sizeof(udpdest));
	udpdest.sin_family = AF_INET;
	udpdest.sin_port = htons(AUN_PORT);

	/* At the moment we use the stupid Acorn scheme of Econet address
	   y.x maps to IP a.b.c.x.  This should be replaced with something
	   more flexible and more aware of subnet masks.  */
	{
		struct in_device *idev;
		unsigned long network = 0;

		rcu_read_lock();
		idev = __in_dev_get_rcu(dev);
		if (idev) {
			if (idev->ifa_list)
				network = ntohl(idev->ifa_list->ifa_address) &
					0xffffff00;		/* !!! */
		}
		rcu_read_unlock();
		udpdest.sin_addr.s_addr = htonl(network | addr.station);
	}

	ah.port = port;
	ah.cb = cb & 0x7f;
	ah.code = 2;		/* magic */
	ah.pad = 0;

	/* tack our header on the front of the iovec */
	size = sizeof(struct aunhdr);
	/*
	 * XXX: that is b0rken.  We can't mix userland and kernel pointers
	 * in iovec, since on a lot of platforms copy_from_user() will
	 * *not* work with the kernel and userland ones at the same time,
	 * regardless of what we do with set_fs().  And we are talking about
	 * econet-over-ethernet here, so "it's only ARM anyway" doesn't
	 * apply.  Any suggestions on fixing that code?		-- AV
	 */
	iov[0].iov_base = (void *)&ah;
	iov[0].iov_len = size;
	for (i = 0; i < msg->msg_iovlen; i++) {
		void __user *base = msg->msg_iov[i].iov_base;
		size_t len = msg->msg_iov[i].iov_len;
		/* Check it now since we switch to KERNEL_DS later. */
		if (!access_ok(VERIFY_READ, base, len)) {
			mutex_unlock(&econet_mutex);
			return -EFAULT;
		}
		iov[i+1].iov_base = base;
		iov[i+1].iov_len = len;
		size += len;
	}

	/* Get a skbuff (no data, just holds our cb information) */
	if ((skb = sock_alloc_send_skb(sk, 0,
				       msg->msg_flags & MSG_DONTWAIT,
				       &err)) == NULL) {
		mutex_unlock(&econet_mutex);
		return err;
	}

	eb = (struct ec_cb *)&skb->cb;

	eb->cookie = saddr->cookie;
	eb->timeout = (5*HZ);
	eb->start = jiffies;
	ah.handle = aun_seq;
	eb->seq = (aun_seq++);
	eb->sec = *saddr;

	skb_queue_tail(&aun_queue, skb);

	udpmsg.msg_name = (void *)&udpdest;
	udpmsg.msg_namelen = sizeof(udpdest);
	udpmsg.msg_iov = &iov[0];
	udpmsg.msg_iovlen = msg->msg_iovlen + 1;
	udpmsg.msg_control = NULL;
	udpmsg.msg_controllen = 0;
	udpmsg.msg_flags=0;

	oldfs = get_fs(); set_fs(KERNEL_DS);	/* More privs :-) */
	err = sock_sendmsg(udpsock, &udpmsg, size);
	set_fs(oldfs);
#else
	err = -EPROTOTYPE;
#endif
	mutex_unlock(&econet_mutex);

	return err;
}
/* PANid, channel, beacon_order = 15, superframe_order = 15,
 * PAN_coordinator, battery_life_extension = 0,
 * coord_realignment = 0, security_enable = 0
*/
int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
{
	struct net_device *dev;
	struct ieee802154_addr addr;

	u8 channel, bcn_ord, sf_ord;
	u8 page;
	int pan_coord, blx, coord_realign;
	int ret = -EBUSY;

	if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
	    !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] ||
	    !info->attrs[IEEE802154_ATTR_CHANNEL] ||
	    !info->attrs[IEEE802154_ATTR_BCN_ORD] ||
	    !info->attrs[IEEE802154_ATTR_SF_ORD] ||
	    !info->attrs[IEEE802154_ATTR_PAN_COORD] ||
	    !info->attrs[IEEE802154_ATTR_BAT_EXT] ||
	    !info->attrs[IEEE802154_ATTR_COORD_REALIGN]
	 )
		return -EINVAL;

	dev = ieee802154_nl_get_dev(info);
	if (!dev)
		return -ENODEV;

	if (netif_running(dev))
		goto out;

	if (!ieee802154_mlme_ops(dev)->start_req) {
		ret = -EOPNOTSUPP;
		goto out;
	}

	addr.mode = IEEE802154_ADDR_SHORT;
	addr.short_addr = nla_get_shortaddr(
			info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
	addr.pan_id = nla_get_shortaddr(
			info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);

	channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]);
	bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]);
	sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]);
	pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]);
	blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]);
	coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]);

	if (info->attrs[IEEE802154_ATTR_PAGE])
		page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
	else
		page = 0;

	if (addr.short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
		ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS);
		dev_put(dev);
		return -EINVAL;
	}

	rtnl_lock();
	ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page,
		bcn_ord, sf_ord, pan_coord, blx, coord_realign);
	rtnl_unlock();

	/* FIXME: add validation for unused parameters to be sane
	 * for SoftMAC
	 */
	ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);

out:
	dev_put(dev);
	return ret;
}
int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info)
{
	struct net_device *dev = NULL;
	int rc = -EINVAL;
	struct ieee802154_mlme_ops *ops;
	struct ieee802154_llsec_params params;
	int changed = 0;

	pr_debug("%s\n", __func__);

	dev = ieee802154_nl_get_dev(info);
	if (!dev)
		return -ENODEV;

	if (!info->attrs[IEEE802154_ATTR_LLSEC_ENABLED] &&
	    !info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE] &&
	    !info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL])
		goto out;

	ops = ieee802154_mlme_ops(dev);
	if (!ops->llsec) {
		rc = -EOPNOTSUPP;
		goto out;
	}

	if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL] &&
	    nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) > 7)
		goto out;

	if (info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]) {
		params.enabled = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]);
		changed |= IEEE802154_LLSEC_PARAM_ENABLED;
	}

	if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]) {
		if (ieee802154_llsec_parse_key_id(info, &params.out_key))
			goto out;

		changed |= IEEE802154_LLSEC_PARAM_OUT_KEY;
	}

	if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) {
		params.out_level = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]);
		changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL;
	}

	if (info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]) {
		u32 fc = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);

		params.frame_counter = cpu_to_be32(fc);
		changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER;
	}

	rc = ops->llsec->set_params(dev, &params, changed);

	dev_put(dev);

	return rc;
out:
	dev_put(dev);
	return rc;
}
Exemple #6
0
static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
{
	int vifi = vifc->vifc_vifi;
	struct vif_device *v = &net->ipv4.vif_table[vifi];
	struct net_device *dev;
	struct in_device *in_dev;
	int err;

	/* Is vif busy ? */
	if (VIF_EXISTS(net, vifi))
		return -EADDRINUSE;

	switch (vifc->vifc_flags) {
#ifdef CONFIG_IP_PIMSM
	case VIFF_REGISTER:
		/*
		 * Special Purpose VIF in PIM
		 * All the packets will be sent to the daemon
		 */
		if (net->ipv4.mroute_reg_vif_num >= 0)
			return -EADDRINUSE;
		dev = ipmr_reg_vif(net);
		if (!dev)
			return -ENOBUFS;
		err = dev_set_allmulti(dev, 1);
		if (err) {
			unregister_netdevice(dev);
			dev_put(dev);
			return err;
		}
		break;
#endif
	case VIFF_TUNNEL:
		dev = ipmr_new_tunnel(net, vifc);
		if (!dev)
			return -ENOBUFS;
		err = dev_set_allmulti(dev, 1);
		if (err) {
			ipmr_del_tunnel(dev, vifc);
			dev_put(dev);
			return err;
		}
		break;

	case VIFF_USE_IFINDEX:
	case 0:
		if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
			dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
			if (dev && dev->ip_ptr == NULL) {
				dev_put(dev);
				return -EADDRNOTAVAIL;
			}
		} else
			dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);

		if (!dev)
			return -EADDRNOTAVAIL;
		err = dev_set_allmulti(dev, 1);
		if (err) {
			dev_put(dev);
			return err;
		}
		break;
	default:
		return -EINVAL;
	}

	if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
		dev_put(dev);
		return -EADDRNOTAVAIL;
	}
	IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
	ip_rt_multicast_event(in_dev);

	/*
	 *	Fill in the VIF structures
	 */
	v->rate_limit = vifc->vifc_rate_limit;
	v->local = vifc->vifc_lcl_addr.s_addr;
	v->remote = vifc->vifc_rmt_addr.s_addr;
	v->flags = vifc->vifc_flags;
	if (!mrtsock)
		v->flags |= VIFF_STATIC;
	v->threshold = vifc->vifc_threshold;
	v->bytes_in = 0;
	v->bytes_out = 0;
	v->pkt_in = 0;
	v->pkt_out = 0;
	v->link = dev->ifindex;
	if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
		v->link = dev->iflink;

	/* And finish update writing critical data */
	write_lock_bh(&mrt_lock);
	v->dev = dev;
#ifdef CONFIG_IP_PIMSM
	if (v->flags&VIFF_REGISTER)
		net->ipv4.mroute_reg_vif_num = vifi;
#endif
	if (vifi+1 > net->ipv4.maxvif)
		net->ipv4.maxvif = vifi+1;
	write_unlock_bh(&mrt_lock);
	return 0;
}
Exemple #7
0
static int __init kaodv_init(void)
{
	struct net_device *dev = NULL;
	struct in_device *indev;
	struct in_ifaddr **ifap = NULL;
	struct in_ifaddr *ifa = NULL;

	int i, ret = -ENOMEM;

#ifndef KERNEL26
	EXPORT_NO_SYMBOLS;
#endif

	kaodv_expl_init();

	ret = kaodv_queue_init();

	if (ret < 0)
		return ret;

	ret = kaodv_netlink_init();

	if (ret < 0)
		goto cleanup_queue;

	ret = nf_register_hook(&kaodv_ops[0]);

	if (ret < 0)
		goto cleanup_netlink;

	ret = nf_register_hook(&kaodv_ops[1]);

	if (ret < 0)
		goto cleanup_hook0;

	ret = nf_register_hook(&kaodv_ops[2]);

	if (ret < 0)
		goto cleanup_hook1;

	/* Prefetch network device info (ip, broadcast address, ifindex). */
	for (i = 0; i < MAX_INTERFACES; i++) {
		if (!ifname[i])
			break;
		dev = dev_get_by_name(ifname[i]);
		if (!dev) {
			printk("No device %s available, ignoring!\n",
			       ifname[i]);
			continue;
		}
		netdevs[nif].ifindex = dev->ifindex;

//      indev = inetdev_by_index(dev->ifindex);
		indev = in_dev_get(dev);

		if (indev) {
			for (ifap = &indev->ifa_list; (ifa = *ifap) != NULL;
			     ifap = &ifa->ifa_next)
				if (!strcmp(dev->name, ifa->ifa_label))
					break;

			if (ifa) {
				netdevs[nif].ip_addr = ifa->ifa_address;
				netdevs[nif].bc_addr = ifa->ifa_broadcast;

				//printk("dev ip=%s bc=%s\n", print_ip(netdevs[nif].ip_addr), print_ip(netdevs[nif].bc_addr));

			}
			in_dev_put(indev);
		}
		nif++;
		dev_put(dev);
	}

	proc_net_create("kaodv", 0, kaodv_proc_info);

	return ret;

      cleanup_hook1:
	nf_unregister_hook(&kaodv_ops[1]);
      cleanup_hook0:
	nf_unregister_hook(&kaodv_ops[0]);
      cleanup_netlink:
	kaodv_netlink_fini();
      cleanup_queue:
	kaodv_queue_fini();
	return ret;
}
Exemple #8
0
static int comp_rx_data(struct mbo *mbo)
{
	const u32 zero = 0;
	struct net_dev_context *nd;
	char *buf = mbo->virt_address;
	u32 len = mbo->processed_length;
	struct sk_buff *skb;
	struct net_device *dev;
	unsigned int skb_len;
	int ret = 0;

	nd = get_net_dev_hold(mbo->ifp);
	if (!nd)
		return -EIO;

	if (nd->rx.ch_id != mbo->hdm_channel_id) {
		ret = -EIO;
		goto put_nd;
	}

	dev = nd->dev;

	if (nd->is_mamac) {
		if (!pms_is_mamac(buf, len)) {
			ret = -EIO;
			goto put_nd;
		}

		skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
	} else {
		if (!PMS_IS_MEP(buf, len)) {
			ret = -EIO;
			goto put_nd;
		}

		skb = dev_alloc_skb(len - MEP_HDR_LEN);
	}

	if (!skb) {
		dev->stats.rx_dropped++;
		pr_err_once("drop packet: no memory for skb\n");
		goto out;
	}

	skb->dev = dev;

	if (nd->is_mamac) {
		/* dest */
		ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);

		/* src */
		skb_put_data(skb, &zero, 4);
		skb_put_data(skb, buf + 5, 2);

		/* eth type */
		skb_put_data(skb, buf + 10, 2);

		buf += MDP_HDR_LEN;
		len -= MDP_HDR_LEN;
	} else {
		buf += MEP_HDR_LEN;
		len -= MEP_HDR_LEN;
	}

	skb_put_data(skb, buf, len);
	skb->protocol = eth_type_trans(skb, dev);
	skb_len = skb->len;
	if (netif_rx(skb) == NET_RX_SUCCESS) {
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb_len;
	} else {
		dev->stats.rx_dropped++;
	}

out:
	most_put_mbo(mbo);

put_nd:
	dev_put(nd->dev);
	return ret;
}
Exemple #9
0
static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev_ptr)
{
	struct net_device *dev;
	struct mpoa_client *mpc;
	struct lec_priv *priv;

	dev = (struct net_device *)dev_ptr;
	if (dev->name == NULL || strncmp(dev->name, "lec", 3))
		return NOTIFY_DONE; /* we are only interested in lec:s */
	
	switch (event) {
	case NETDEV_REGISTER:       /* a new lec device was allocated */
		priv = (struct lec_priv *)dev->priv;
		if (priv->lane_version < 2)
			break;
		priv->lane2_ops->associate_indicator = lane2_assoc_ind;
		mpc = find_mpc_by_itfnum(priv->itfnum);
		if (mpc == NULL) {
			dprintk("mpoa: mpoa_event_listener: allocating new mpc for %s\n",
			       dev->name);
			mpc = alloc_mpc();
			if (mpc == NULL) {
				printk("mpoa: mpoa_event_listener: no new mpc");
				break;
			}
		}
		mpc->dev_num = priv->itfnum;
		mpc->dev = dev;
		dev_hold(dev);
		dprintk("mpoa: (%s) was initialized\n", dev->name);
		break;
	case NETDEV_UNREGISTER:
		/* the lec device was deallocated */
		mpc = find_mpc_by_lec(dev);
		if (mpc == NULL)
			break;
		dprintk("mpoa: device (%s) was deallocated\n", dev->name);
		stop_mpc(mpc);
		dev_put(mpc->dev);
		mpc->dev = NULL;
		break;
	case NETDEV_UP:
		/* the dev was ifconfig'ed up */
		mpc = find_mpc_by_lec(dev);
		if (mpc == NULL)
			break;
		if (mpc->mpoad_vcc != NULL) {
			start_mpc(mpc, dev);
		}
		break;
	case NETDEV_DOWN:
		/* the dev was ifconfig'ed down */
		/* this means that the flow of packets from the
		 * upper layer stops
		 */
		mpc = find_mpc_by_lec(dev);
		if (mpc == NULL)
			break;
		if (mpc->mpoad_vcc != NULL) {
			stop_mpc(mpc);
		}
		break;
	case NETDEV_REBOOT:
	case NETDEV_CHANGE:
	case NETDEV_CHANGEMTU:
	case NETDEV_CHANGEADDR:
	case NETDEV_GOING_DOWN:
		break;
	default:
		break;
	}

	return NOTIFY_DONE;
}
Exemple #10
0
static inline int qedr_gsi_build_header(struct qedr_dev *dev,
					struct qedr_qp *qp,
					struct ib_send_wr *swr,
					struct ib_ud_header *udh,
					int *roce_mode)
{
	bool has_vlan = false, has_grh_ipv6 = true;
	struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
	union ib_gid sgid;
	int send_size = 0;
	u16 vlan_id = 0;
	u16 ether_type;
	struct ib_gid_attr sgid_attr;
	int rc;
	int ip_ver = 0;

	bool has_udp = false;
	int i;

	send_size = 0;
	for (i = 0; i < swr->num_sge; ++i)
		send_size += swr->sg_list[i].length;

	rc = ib_get_cached_gid(qp->ibqp.device, rdma_ah_get_port_num(ah_attr),
			       grh->sgid_index, &sgid, &sgid_attr);
	if (rc) {
		DP_ERR(dev,
		       "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
		       rdma_ah_get_port_num(ah_attr),
		       grh->sgid_index);
		return rc;
	}

	if (sgid_attr.ndev) {
		vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
		if (vlan_id < VLAN_CFI_MASK)
			has_vlan = true;

		dev_put(sgid_attr.ndev);
	}

	if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
		DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
		       grh->sgid_index);
		return -ENOENT;
	}

	has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
	if (!has_udp) {
		/* RoCE v1 */
		ether_type = ETH_P_IBOE;
		*roce_mode = ROCE_V1;
	} else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
		/* RoCE v2 IPv4 */
		ip_ver = 4;
		ether_type = ETH_P_IP;
		has_grh_ipv6 = false;
		*roce_mode = ROCE_V2_IPV4;
	} else {
		/* RoCE v2 IPv6 */
		ip_ver = 6;
		ether_type = ETH_P_IPV6;
		*roce_mode = ROCE_V2_IPV6;
	}

	rc = ib_ud_header_init(send_size, false, true, has_vlan,
			       has_grh_ipv6, ip_ver, has_udp, 0, udh);
	if (rc) {
		DP_ERR(dev, "gsi post send: failed to init header\n");
		return rc;
	}

	/* ENET + VLAN headers */
	ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
	ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
	if (has_vlan) {
		udh->eth.type = htons(ETH_P_8021Q);
		udh->vlan.tag = htons(vlan_id);
		udh->vlan.type = htons(ether_type);
	} else {
		udh->eth.type = htons(ether_type);
	}

	/* BTH */
	udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
	udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
	udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
	udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
	udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;

	/* DETH */
	udh->deth.qkey = htonl(0x80010000);
	udh->deth.source_qpn = htonl(QEDR_GSI_QPN);

	if (has_grh_ipv6) {
		/* GRH / IPv6 header */
		udh->grh.traffic_class = grh->traffic_class;
		udh->grh.flow_label = grh->flow_label;
		udh->grh.hop_limit = grh->hop_limit;
		udh->grh.destination_gid = grh->dgid;
		memcpy(&udh->grh.source_gid.raw, &sgid.raw,
		       sizeof(udh->grh.source_gid.raw));
	} else {
		/* IPv4 header */
		u32 ipv4_addr;

		udh->ip4.protocol = IPPROTO_UDP;
		udh->ip4.tos = htonl(grh->flow_label);
		udh->ip4.frag_off = htons(IP_DF);
		udh->ip4.ttl = grh->hop_limit;

		ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
		udh->ip4.saddr = ipv4_addr;
		ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
		udh->ip4.daddr = ipv4_addr;
		/* note: checksum is calculated by the device */
	}

	/* UDP */
	if (has_udp) {
		udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
		udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
		udh->udp.csum = 0;
		/* UDP length is untouched hence is zero */
	}
	return 0;
}
/*
 * Stuff received packets to associated sockets.
 * On error, returns non-zero and releases the skb.
 */
static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
			struct packet_type *pkttype,
			struct net_device *orig_dev)
{
	struct net *net = dev_net(dev);
	struct phonethdr *ph;
	struct sockaddr_pn sa;
	u16 len;

	/* check we have at least a full Phonet header */
	if (!pskb_pull(skb, sizeof(struct phonethdr)))
		goto out;

	/* check that the advertised length is correct */
	ph = pn_hdr(skb);
	len = get_unaligned_be16(&ph->pn_length);
	if (len < 2)
		goto out;
	len -= 2;
	if ((len > skb->len) || pskb_trim(skb, len))
		goto out;
	skb_reset_transport_header(skb);

	pn_skb_get_dst_sockaddr(skb, &sa);

	/* check if this is broadcasted */
	if (pn_sockaddr_get_addr(&sa) == PNADDR_BROADCAST) {
		pn_deliver_sock_broadcast(net, skb);
		goto out;
	}

	/* check if we are the destination */
	if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) {
		/* Phonet packet input */
		struct sock *sk = pn_find_sock_by_sa(net, &sa);

		if (sk)
			return sk_receive_skb(sk, skb, 0);

		if (can_respond(skb)) {
			send_obj_unreachable(skb);
			send_reset_indications(skb);
		}
	} else if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
		goto out; /* Race between address deletion and loopback */
	else {
		/* Phonet packet routing */
		struct net_device *out_dev;

		out_dev = phonet_route_output(net, pn_sockaddr_get_addr(&sa));
		if (!out_dev) {
			LIMIT_NETDEBUG(KERN_WARNING"No Phonet route to %02X\n",
					pn_sockaddr_get_addr(&sa));
			goto out;
		}

		__skb_push(skb, sizeof(struct phonethdr));
		skb->dev = out_dev;
		if (out_dev == dev) {
			LIMIT_NETDEBUG(KERN_ERR"Phonet loop to %02X on %s\n",
					pn_sockaddr_get_addr(&sa), dev->name);
			goto out_dev;
		}
		/* Some drivers (e.g. TUN) do not allocate HW header space */
		if (skb_cow_head(skb, out_dev->hard_header_len))
			goto out_dev;

		if (dev_hard_header(skb, out_dev, ETH_P_PHONET, NULL, NULL,
					skb->len) < 0)
			goto out_dev;
		dev_queue_xmit(skb);
		dev_put(out_dev);
		return NET_RX_SUCCESS;
out_dev:
		dev_put(out_dev);
	}

out:
	kfree_skb(skb);
	return NET_RX_DROP;
}
Exemple #12
0
ssize_t WIFI_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
{
	int retval = -EIO;
	struct net_device *netdev = NULL;
	char local[12] = {0};
	int wait_cnt = 0;

	down(&wr_mtx);
	if (count <= 0) {
	    WIFI_ERR_FUNC("WIFI_write invalid param\n");
	    goto done;
	}

	if (0 == copy_from_user(local, buf, (count > sizeof(local)) ? sizeof(local) : count)) {
		local[11] = 0;
		WIFI_INFO_FUNC("WIFI_write %s\n", local);
		if (local[0] == '0') {
			if (powered == 0) {
				WIFI_INFO_FUNC("WIFI is already power off!\n");
				retval = count;
				goto done;
			} else {
				/* WIFI FUNCTION OFF */
				WIFI_func_ctrl(0);
				WIFI_INFO_FUNC("WMT turn off WIFI OK!\n");
				powered = 0;
				retval = count;
			}
		}
		else if (local[0] == '1') {
			/* WIFI FUNCTION ON */
			if (powered == 1) {
				WIFI_INFO_FUNC("WIFI is already power on!\n");
				retval = count;
				goto done;
			} else {
				WIFI_func_ctrl(1);
				WIFI_INFO_FUNC("WMT turn on WIFI success!\n");
				powered = 1;
				retval = count;
			}
		}
		else if (local[0] == 'S' || local[0] == 'P' || local[0] == 'A') {
			if (powered == 0) {
				WIFI_func_ctrl(1);
				WIFI_INFO_FUNC("WMT turn on WIFI success!\n");
				powered = 1;
			}

			/* Polling NET DEV if exist */
			netdev = dev_get_by_name(&init_net, WLAN_IFACE_NAME);
			while (netdev == NULL && wait_cnt < 10) {
				WIFI_ERR_FUNC("Fail to get wlan0 net device, sleep %d ms(%d)\n", WLAN_QUERYDEV_TIME,wait_cnt);
				msleep(WLAN_QUERYDEV_TIME);
				wait_cnt++;
				netdev = dev_get_by_name(&init_net, WLAN_IFACE_NAME);
			}
			if (wait_cnt >= 10) {
				WIFI_ERR_FUNC("Get wlan0 net device timeout\n");
				goto done;
			}
			WIFI_INFO_FUNC("wlan0 net device created\n");
			dev_put(netdev);
			netdev = NULL;
		}
	}

done:
    if (netdev != NULL){
        dev_put(netdev);
    }
    up(&wr_mtx);
    return (retval);
}
static int dn_def_dev_handler(struct ctl_table *table, int write,
				void __user *buffer,
				size_t *lenp, loff_t *ppos)
{
	size_t len;
	struct net_device *dev;
	char devname[17];

	if (!*lenp || (*ppos && !write)) {
		*lenp = 0;
		return 0;
	}

	if (write) {
		if (*lenp > 16)
			return -E2BIG;

		if (copy_from_user(devname, buffer, *lenp))
			return -EFAULT;

		devname[*lenp] = 0;
		strip_it(devname);

		dev = dev_get_by_name(&init_net, devname);
		if (dev == NULL)
			return -ENODEV;

		if (dev->dn_ptr == NULL) {
			dev_put(dev);
			return -ENODEV;
		}

		if (dn_dev_set_default(dev, 1)) {
			dev_put(dev);
			return -ENODEV;
		}
		*ppos += *lenp;

		return 0;
	}

	dev = dn_dev_get_default();
	if (dev == NULL) {
		*lenp = 0;
		return 0;
	}

	strcpy(devname, dev->name);
	dev_put(dev);
	len = strlen(devname);
	devname[len++] = '\n';

	if (len > *lenp) len = *lenp;

	if (copy_to_user(buffer, devname, len))
		return -EFAULT;

	*lenp = len;
	*ppos += len;

	return 0;
}
Exemple #14
0
static int br_ioctl_device(struct net_bridge *br,
			   unsigned int cmd,
			   unsigned long arg0,
			   unsigned long arg1,
			   unsigned long arg2)
{
	if (br == NULL)
		return -EINVAL;

	switch (cmd)
	{
	case BRCTL_ADD_IF:
	case BRCTL_DEL_IF:
	{
		struct net_device *dev;
		int ret;

		dev = dev_get_by_index(arg0);
		if (dev == NULL)
			return -EINVAL;

		if (cmd == BRCTL_ADD_IF)
			ret = br_add_if(br, dev);
		else
			ret = br_del_if(br, dev);

		dev_put(dev);
		return ret;
	}

	case BRCTL_GET_BRIDGE_INFO:
	{
		struct __bridge_info b;

		memset(&b, 0, sizeof(struct __bridge_info));
		memcpy(&b.designated_root, &br->designated_root, 8);
		memcpy(&b.bridge_id, &br->bridge_id, 8);
		b.root_path_cost = br->root_path_cost;
		b.max_age = br->max_age;
		b.hello_time = br->hello_time;
		b.forward_delay = br->forward_delay;
		b.bridge_max_age = br->bridge_max_age;
		b.bridge_hello_time = br->bridge_hello_time;
		b.bridge_forward_delay = br->bridge_forward_delay;
		b.topology_change = br->topology_change;
		b.topology_change_detected = br->topology_change_detected;
		b.root_port = br->root_port;
		b.stp_enabled = br->stp_enabled;
		b.ageing_time = br->ageing_time;
		b.gc_interval = br->gc_interval;
		b.hello_timer_value = br_timer_get_residue(&br->hello_timer);
		b.tcn_timer_value = br_timer_get_residue(&br->tcn_timer);
		b.topology_change_timer_value = br_timer_get_residue(&br->topology_change_timer);
		b.gc_timer_value = br_timer_get_residue(&br->gc_timer);

		if (copy_to_user((void *)arg0, &b, sizeof(b)))
			return -EFAULT;

		return 0;
	}

	case BRCTL_GET_PORT_LIST:
	{
		int i;
		int indices[256];

		for (i=0;i<256;i++)
			indices[i] = 0;

		br_get_port_ifindices(br, indices);
		if (copy_to_user((void *)arg0, indices, 256*sizeof(int)))
			return -EFAULT;

		return 0;
	}

	case BRCTL_SET_BRIDGE_FORWARD_DELAY:
		br->bridge_forward_delay = arg0;
		if (br_is_root_bridge(br))
			br->forward_delay = arg0;
		return 0;

	case BRCTL_SET_BRIDGE_HELLO_TIME:
		br->bridge_hello_time = arg0;
		if (br_is_root_bridge(br))
			br->hello_time = arg0;
		return 0;

	case BRCTL_SET_BRIDGE_MAX_AGE:
		br->bridge_max_age = arg0;
		if (br_is_root_bridge(br))
			br->max_age = arg0;
		return 0;

	case BRCTL_SET_AGEING_TIME:
		br->ageing_time = arg0;
		return 0;

	case BRCTL_SET_GC_INTERVAL:
		br->gc_interval = arg0;
		return 0;

	case BRCTL_GET_PORT_INFO:
	{
		struct __port_info p;
		struct net_bridge_port *pt;

		if ((pt = br_get_port(br, arg1)) == NULL)
			return -EINVAL;

		memset(&p, 0, sizeof(struct __port_info));
		memcpy(&p.designated_root, &pt->designated_root, 8);
		memcpy(&p.designated_bridge, &pt->designated_bridge, 8);
		p.port_id = pt->port_id;
		p.designated_port = pt->designated_port;
		p.path_cost = pt->path_cost;
		p.designated_cost = pt->designated_cost;
		p.state = pt->state;
		p.top_change_ack = pt->topology_change_ack;
		p.config_pending = pt->config_pending;
		p.message_age_timer_value = br_timer_get_residue(&pt->message_age_timer);
		p.forward_delay_timer_value = br_timer_get_residue(&pt->forward_delay_timer);
		p.hold_timer_value = br_timer_get_residue(&pt->hold_timer);

		if (copy_to_user((void *)arg0, &p, sizeof(p)))
			return -EFAULT;

		return 0;
	}

	case BRCTL_SET_BRIDGE_STP_STATE:
		br->stp_enabled = arg0?1:0;
		return 0;

	case BRCTL_SET_BRIDGE_PRIORITY:
		br_stp_set_bridge_priority(br, arg0);
		return 0;

	case BRCTL_SET_PORT_PRIORITY:
	{
		struct net_bridge_port *p;

		if ((p = br_get_port(br, arg0)) == NULL)
			return -EINVAL;
		br_stp_set_port_priority(p, arg1);
		return 0;
	}

	case BRCTL_SET_PATH_COST:
	{
		struct net_bridge_port *p;

		if ((p = br_get_port(br, arg0)) == NULL)
			return -EINVAL;
		br_stp_set_path_cost(p, arg1);
		return 0;
	}

	case BRCTL_GET_FDB_ENTRIES:
#ifdef CONFIG_RTK_GUEST_ZONE
		return br_fdb_get_entries(br, (void *)arg0, arg1, arg2, 0);
#else		
		return br_fdb_get_entries(br, (void *)arg0, arg1, arg2);
#endif

#ifdef MULTICAST_FILTER
	case 101:
		printk(KERN_INFO "%s: clear port list of multicast filter\n", br->dev.name);
		br->fltr_portlist_num = 0;
		return 0;

	case 102:
	{
		int i;

		if (br->fltr_portlist_num == MLCST_FLTR_ENTRY) {
			printk(KERN_INFO "%s: set port num of multicast filter, entries full!\n", br->dev.name);
			return 0;
		}
		for (i=0; i<br->fltr_portlist_num; i++)
			if (br->fltr_portlist[i] == (unsigned short)arg0)
				return 0;
		printk(KERN_INFO "%s: set port num [%d] of multicast filter\n", br->dev.name, (unsigned short)arg0);
		br->fltr_portlist[br->fltr_portlist_num] = (unsigned short)arg0;
		br->fltr_portlist_num++;
		return 0;
	}
#endif

#ifdef MULTICAST_BWCTRL
	case 103:
	{
		struct net_bridge_port *p;

		if ((p = br_get_port(br, arg0)) == NULL)
			return -EINVAL;
		if (arg1 == 0) {
			p->bandwidth = 0;
			printk(KERN_INFO "%s: port %i(%s) multicast bandwidth all\n",
			       p->br->dev.name, p->port_no, p->dev->name);
		}
		else {
			p->bandwidth = arg1 * 1000 / 8;
			printk(KERN_INFO "%s: port %i(%s) multicast bandwidth %dkbps\n",
			       p->br->dev.name, p->port_no, p->dev->name, (unsigned int)arg1);
		}
		return 0;
	}
#endif

#ifdef RTL_BRIDGE_MAC_CLONE
	case 104:	// MAC Clone enable/disable
	{
		struct net_bridge_port *p;
		unsigned char nullmac[] = {0, 0, 0, 0, 0, 0};

		if ((p = br_get_port(br, arg0)) == NULL)
			return -EINVAL;

		if ((p->macCloneTargetPort = br_get_port(br, arg1)) == NULL)
			return -EINVAL;

		p->enable_mac_clone = 1;
		p->mac_clone_completed = 0;

		if (clone_pair.port != p->macCloneTargetPort)
		{
			TRACE("clone_pair.port [%x] != p->macCloneTargetPort [%x], don't clone\n", (unsigned int)clone_pair.port, (unsigned int)p->macCloneTargetPort);
			clone_pair.port = p->macCloneTargetPort;
			TRACE("clone_pair.port = %x\n", (unsigned int)clone_pair.port);
			memset(clone_pair.mac.addr, 0, ETH_ALEN);
		}
		else
		{
			if(!memcmp(clone_pair.mac.addr, nullmac, ETH_ALEN))
			{
				TRACE("clone_pair.mac.addr == nullmac, don't clone\n");
			}
			else
			{
				TRACE("Clone MAC from previous one\n");
				br_mac_clone(p->macCloneTargetPort, clone_pair.mac.addr);
			}
		}

		TRACE("device %s, Enable MAC Clone to device %s\n", p->dev->name, p->macCloneTargetPort->dev->name);
		return 0;
	}
#endif

#ifdef CONFIG_RTK_GUEST_ZONE
	case 105:	// set zone
	{
		struct net_bridge_port *p;
		if ((p = br_get_port(br, arg0)) == NULL)
			return -EINVAL;
		p->is_guest_zone = arg1;
#ifdef DEBUG_GUEST_ZONE
		panic_printk("set device=%s is_guest_zone=%d\n", p->dev->name, p->is_guest_zone);
#endif
		return 0;
	}

	case 106:	// set zone isolation
		br->is_zone_isolated = arg0;
#ifdef DEBUG_GUEST_ZONE
		panic_printk("set zone isolation=%d\n",	br->is_zone_isolated);		
#endif
		return 0;
		
	case 107:	// set guest isolation
		br->is_guest_isolated = arg0;
#ifdef DEBUG_GUEST_ZONE
		panic_printk("set guest isolation=%d\n",	br->is_guest_isolated);		
#endif		
		return 0;

	case 108:	// set lock mac list
	{
		unsigned char mac[6];
		int i;
		if (copy_from_user(mac, (unsigned long*)arg0, 6))
			return -EFAULT;
#ifdef DEBUG_GUEST_ZONE
		panic_printk("set lock client list=%02x:%02x:%02x:%02x:%02x:%02x\n", 
							mac[0],mac[1],mac[2],mac[3],mac[4],mac[5]);	
#endif
		if (!memcmp(mac, "\x0\x0\x0\x0\x0\x0", 6)) {  // reset list
#ifdef DEBUG_GUEST_ZONE		
			panic_printk("reset lock list!\n");
#endif
			br->lock_client_num = 0;
			return 0;		
		}
		for (i=0; i<br->lock_client_num; i++) {
			if (!memcmp(mac, br->lock_client_list[i], 6)) {
#ifdef DEBUG_GUEST_ZONE				
				panic_printk("duplicated lock entry!\n");
#endif
				return 0;
			}			
		}
		if (br->lock_client_num >= MAX_LOCK_CLIENT) {
#ifdef DEBUG_GUEST_ZONE			
			panic_printk("Add failed, lock list table full!\n");
#endif
			return 0;
		}
		memcpy(br->lock_client_list[br->lock_client_num], mac, 6);
		br->lock_client_num++;
		return 0;
	}		
	case 109:	// show guest info
	{
		int i;
		panic_printk("\n");
		panic_printk("  zone isolation: %d\n", br->is_zone_isolated);
		panic_printk("  guest isolation: %d\n", br->is_guest_isolated);
		i = 1;
		while (1) {
			struct net_bridge_port *p;
			if ((p = br_get_port(br, i++)) == NULL)
				break;
			panic_printk("  %s: %s\n", p->dev->name, (p->is_guest_zone ? "guest" : "host"));			
		}		
		panic_printk("  locked client no: %d\n", br->lock_client_num);
		for (i=0; i< br->lock_client_num; i++) {
			unsigned char *mac;
			mac = br->lock_client_list[i];
			panic_printk("    mac=%02x:%02x:%02x:%02x:%02x:%02x\n", 
							mac[0],mac[1],mac[2],mac[3],mac[4],mac[5]);	
		}			
		panic_printk("\n");		
		return 0;
	}	

	case 110:
		return br_fdb_get_entries(br, (void *)arg0, arg1, arg2, 1);	
#endif	// CONFIG_RTK_GUEST_ZONE
	}

	return -EOPNOTSUPP;
}
Exemple #15
0
static unsigned int 
route6_oif(const struct ip6t_route_target_info *route_info,
	   struct sk_buff *skb) 
{
	unsigned int ifindex = 0;
	struct net_device *dev_out = NULL;

	/* The user set the interface name to use.
	 * Getting the current interface index.
	 */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
	if ((dev_out = dev_get_by_name(&init_net, route_info->oif))) {
#else
	if ((dev_out = dev_get_by_name(route_info->oif))) {
#endif
		ifindex = dev_out->ifindex;
	} else {
		/* Unknown interface name : packet dropped */
		if (net_ratelimit()) 
			DEBUGP("ip6t_ROUTE: oif interface %s not found\n", route_info->oif);

		if (route_info->flags & IP6T_ROUTE_CONTINUE)
			return IP6T_CONTINUE;
		else
			return NF_DROP;
	}

	/* Trying the standard way of routing packets */
	if (route6(skb, ifindex, route_info)) {
		dev_put(dev_out);
		if (route_info->flags & IP6T_ROUTE_CONTINUE)
			return IP6T_CONTINUE;
		
		ip_direct_send(skb);
		return NF_STOLEN;
	} else 
		return NF_DROP;
}


static unsigned int 
route6_gw(const struct ip6t_route_target_info *route_info,
	  struct sk_buff *skb) 
{
	if (route6(skb, 0, route_info)) {
		if (route_info->flags & IP6T_ROUTE_CONTINUE)
			return IP6T_CONTINUE;

		ip_direct_send(skb);
		return NF_STOLEN;
	} else
		return NF_DROP;
}

static unsigned int
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
target(struct sk_buff **pskb,
       unsigned int hooknum,
       const struct net_device *in,
       const struct net_device *out,
       const void *targinfo,
       void *userinfo)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
target(struct sk_buff **pskb,
       const struct net_device *in,
       const struct net_device *out,
       unsigned int hooknum,
       const void *targinfo,
       void *userinfo)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
target(struct sk_buff **pskb,
       const struct net_device *in,
       const struct net_device *out,
       unsigned int hooknum,
       const struct xt_target *target,
       const void *targinfo,
       void *userinfo)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
target(struct sk_buff **pskb,
       const struct net_device *in,
       const struct net_device *out,
       unsigned int hooknum,
       const struct xt_target *target,
       const void *targinfo)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
target(struct sk_buff *skb,
       const struct net_device *in,
       const struct net_device *out,
       unsigned int hooknum,
       const struct xt_target *target,
       const void *targinfo)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
target(struct sk_buff *skb,
       const struct xt_target_param *par)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) */
target(struct sk_buff *skb,
       const struct xt_action_param *par)
#endif
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
	const struct ip6t_route_target_info *route_info = targinfo;
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
	const struct ip6t_route_target_info *route_info = par->targinfo;
	unsigned int hooknum = par->hooknum;
#else
	const struct ip6t_route_target_info *route_info = par->targinfo;
	unsigned int hooknum = par->hooknum;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
	struct sk_buff *skb = *pskb;
#endif
	struct in6_addr *gw = (struct in6_addr*)&route_info->gw;
	unsigned int res;

	if (route_info->flags & IP6T_ROUTE_CONTINUE)
		goto do_it;

	/* If we are at PREROUTING or INPUT hook
	 * the TTL isn't decreased by the IP stack
	 */
	if (hooknum == NF_INET_PRE_ROUTING ||
	    hooknum == NF_INET_LOCAL_IN) {

		struct ipv6hdr *ipv6h = ipv6_hdr(skb);

		if (ipv6h->hop_limit <= 1) {
			/* Force OUTPUT device used as source address */
			skb->dev = skb_dst(skb)->dev;

			icmpv6_send(skb, ICMPV6_TIME_EXCEED, 
				    ICMPV6_EXC_HOPLIMIT, 0);

			return NF_DROP;
		}

		ipv6h->hop_limit--;
	}

	if ((route_info->flags & IP6T_ROUTE_TEE)) {
		/*
		 * Copy the skb, and route the copy. Will later return
		 * IP6T_CONTINUE for the original skb, which should continue
		 * on its way as if nothing happened. The copy should be
		 * independantly delivered to the ROUTE --gw.
		 */
		skb = skb_copy(skb, GFP_ATOMIC);
		if (!skb) {
			if (net_ratelimit()) 
				DEBUGP(KERN_DEBUG "ip6t_ROUTE: copy failed!\n");
			return IP6T_CONTINUE;
		}
	}

do_it:
	if (route_info->oif[0]) {
		res = route6_oif(route_info, skb);
	} else if (!ipv6_addr_any(gw)) {
		res = route6_gw(route_info, skb);
	} else {
		if (net_ratelimit()) 
			DEBUGP(KERN_DEBUG "ip6t_ROUTE: no parameter !\n");
		res = IP6T_CONTINUE;
	}

	if ((route_info->flags & IP6T_ROUTE_TEE))
		res = IP6T_CONTINUE;

	return res;
}


#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
static int
checkentry(const char *tablename,
	   const struct ip6t_entry *e,
	   void *targinfo,
	   unsigned int targinfosize,
	   unsigned int hook_mask)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
static int
checkentry(const char *tablename,
	   const void *e,
	   void *targinfo,
	   unsigned int targinfosize,
	   unsigned int hook_mask)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
static int
checkentry(const char *tablename,
	   const void *e,
	   const struct xt_target *target,
	   void *targinfo,
	   unsigned int targinfosize,
	   unsigned int hook_mask)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
static int
checkentry(const char *tablename,
	   const void *e,
	   const struct xt_target *target,
	   void *targinfo,
	   unsigned int hook_mask)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
static bool
checkentry(const char *tablename,
	   const void *e,
	   const struct xt_target *target,
	   void *targinfo,
	   unsigned int hook_mask)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
static bool
checkentry(const struct xt_tgchk_param *par)
#endif
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
	const char *tablename = par->table;
#endif

	if (strcmp(tablename, "mangle") != 0) {
		printk("ip6t_ROUTE: can only be called from \"mangle\" table.\n");
		return 0;
	}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
	if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_route_target_info))) {
		printk(KERN_WARNING "ip6t_ROUTE: targinfosize %u != %Zu\n",
		       targinfosize,
		       IP6T_ALIGN(sizeof(struct ip6t_route_target_info)));
		return 0;
	}
#endif

	return 1;
}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
static struct xt_target ip6t_route_reg = {
#else
static struct ip6t_target ip6t_route_reg = {
#endif
	.name		= "ROUTE",
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
	.family		= AF_INET6,
#endif
	.target		= target,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
	.targetsize	= sizeof(struct ip6t_route_target_info),
#endif
	.checkentry	= checkentry,
	.me		= THIS_MODULE
};


static int __init init(void)
{
	printk(KERN_DEBUG "registering ipv6 ROUTE target\n");
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
	if (xt_register_target(&ip6t_route_reg))
#else
	if (ip6t_register_target(&ip6t_route_reg))
#endif
		return -EINVAL;

	return 0;
}


static void __exit fini(void)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
	xt_unregister_target(&ip6t_route_reg);
#else
	ip6t_unregister_target(&ip6t_route_reg);
#endif
}

module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
Exemple #16
0
static ssize_t schar_write_2(struct file *file, const char *buf, size_t count,
			   loff_t *offset)
{
  /* structured after af_packet.c by S. Durkin */
  
  int len;
  int err;
  
  static struct net_device *dev;
  static struct sk_buff *skb;
  static unsigned short proto=0;
  // printk(KERN_INFO " LSD: write length %d \n",count);

  // sbuf=kmalloc(9000,GFP_KERNEL);
  len=count;
  dev=dev_get_by_name("eth2");
  err=-ENODEV;
  if (dev == NULL)
   goto out_unlock;
            
/*
 *      You may not queue a frame bigger than the mtu. This is the lowest level
 *      raw protocol and you must do your own fragmentation at this level.
*/
                
  err = -EMSGSIZE;
  if(len>dev->mtu+dev->hard_header_len)
  goto out_unlock;
     
  err = -ENOBUFS;
  //  skb = sock_wmalloc(sk, len+dev->hard_header_len+15, 0, GFP_KERNEL);
  skb=dev_alloc_skb(len+dev->hard_header_len+15);   
/*
 *      If the write buffer is full, then tough. At this level the user gets to
 *      deal with the problem - do your own algorithmic backoffs. That's far
 *      more flexible.
*/
              
  if (skb == NULL) 
  goto out_unlock;
     
/*
*      Fill it in 
*/
              
/* FIXME: Save some space for broken drivers that write a
* hard header at transmission time by themselves. PPP is the
* notable one here. This should really be fixed at the driver level.
*/
   skb_reserve(skb,(dev->hard_header_len+15)&~15);
   skb->nh.raw = skb->data;
   proto=htons(ETH_P_ALL);
   /*     	if (dev->hard_header) {
		int res;
		err = -EINVAL;
                addr=NULL;
		res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len);
			skb->tail = skb->data;
			skb->len = 0;
			} */
        			
                        skb->tail = skb->data;
			skb->len = 0;

/* Try to align data part correctly */
			/*      if (dev->hard_header) {
      skb->data -= dev->hard_header_len;
      skb->tail -= dev->hard_header_len;
      } */
 			
  
     //  printk(KERN_INFO " header length %d  \n",dev->hard_header_len);
/* Returns -EFAULT on error */
   //  err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
	
    err = copy_from_user(skb_put(skb,len),buf, count);
   // err = memcpy_fromio(skb_put(skb,len),sbuf,len);
   // printk(KERN_INFO " lsd: len count %d %d %02x  \n",len,count,*(skb->data+98)&0xff);
   skb->protocol = htons(ETH_P_ALL);
   skb->dev = dev;
   skb->priority = 0;
   // skb->pkt_type=PACKET_MR_PROMISC;
   skb->ip_summed=CHECKSUM_UNNECESSARY;
   if (err)
   goto out_free;
     
   err = -ENETDOWN;
   if (!(dev->flags & IFF_UP))
   goto out_free;
     
/*
*      Now send it
*/
     
   dev_queue_xmit(skb);
   dev_put(dev); 
   // printk(KERN_INFO " lsd: len count %d %d %02x  \n",len,count,*(skb->data+98)&0xff);
   // for(i=120;i<160;i++)printk(KERN_INFO " %02x",*(skb->data+i)&0xff);
   // printk(KERN_INFO "\n");
   // kfree(sbuf);
  
   proc_tpackets_2=proc_tpackets_2+1;
   proc_tbytesL_2=proc_tbytesL_2+len;
   if(proc_tbytesL_2>1000000000){
      proc_tbytesL_2=proc_tbytesL_2-1000000000;
      proc_tbytesH_2=proc_tbytesH_2+1;
      //      printk(KERN_INFO "tbytesH %d \n",proc_tbytesH_2);
      //      printk(KERN_INFO "tbytesH %d \n",proc_tbytesH_2);
      //      printk(KERN_INFO "tbytesH %d \n",proc_tbytesH_2);
      //      printk(KERN_INFO "tbytesH %d \n",proc_tbytesH_2);
   }

   return count;
     
   out_free:
     kfree_skb(skb);
   out_unlock:
     if (dev)dev_put(dev);
      // kfree(sbuf);           
  return -err;
}
int sock_setsockopt(struct socket *sock, int level, int optname,
		    char __user *optval, int optlen)
{
	struct sock *sk=sock->sk;
	struct sk_filter *filter;
	int val;
	int valbool;
	struct linger ling;
	int ret = 0;
	
	/*
	 *	Options without arguments
	 */

#ifdef SO_DONTLINGER		/* Compatibility item... */
	if (optname == SO_DONTLINGER) {
		lock_sock(sk);
		sock_reset_flag(sk, SOCK_LINGER);
		release_sock(sk);
		return 0;
	}
#endif
	
  	if(optlen<sizeof(int))
  		return(-EINVAL);
  	
	if (get_user(val, (int __user *)optval))
		return -EFAULT;
	
  	valbool = val?1:0;

	lock_sock(sk);

  	switch(optname) 
  	{
		case SO_DEBUG:	
			if(val && !capable(CAP_NET_ADMIN))
			{
				ret = -EACCES;
			}
			else if (valbool)
				sock_set_flag(sk, SOCK_DBG);
			else
				sock_reset_flag(sk, SOCK_DBG);
			break;
		case SO_REUSEADDR:
			sk->sk_reuse = valbool;
			break;
		case SO_TYPE:
		case SO_ERROR:
			ret = -ENOPROTOOPT;
		  	break;
		case SO_DONTROUTE:
			if (valbool)
				sock_set_flag(sk, SOCK_LOCALROUTE);
			else
				sock_reset_flag(sk, SOCK_LOCALROUTE);
			break;
		case SO_BROADCAST:
			sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
			break;
		case SO_SNDBUF:
			/* Don't error on this BSD doesn't and if you think
			   about it this is right. Otherwise apps have to
			   play 'guess the biggest size' games. RCVBUF/SNDBUF
			   are treated in BSD as hints */
			   
			if (val > sysctl_wmem_max)
				val = sysctl_wmem_max;
set_sndbuf:
			sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
			if ((val * 2) < SOCK_MIN_SNDBUF)
				sk->sk_sndbuf = SOCK_MIN_SNDBUF;
			else
				sk->sk_sndbuf = val * 2;

			/*
			 *	Wake up sending tasks if we
			 *	upped the value.
			 */
			sk->sk_write_space(sk);
			break;

		case SO_SNDBUFFORCE:
			if (!capable(CAP_NET_ADMIN)) {
				ret = -EPERM;
				break;
			}
			goto set_sndbuf;

		case SO_RCVBUF:
			/* Don't error on this BSD doesn't and if you think
			   about it this is right. Otherwise apps have to
			   play 'guess the biggest size' games. RCVBUF/SNDBUF
			   are treated in BSD as hints */
			  
			if (val > sysctl_rmem_max)
				val = sysctl_rmem_max;
set_rcvbuf:
			sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
			/* FIXME: is this lower bound the right one? */
			if ((val * 2) < SOCK_MIN_RCVBUF)
				sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
			else
				sk->sk_rcvbuf = val * 2;
			break;

		case SO_RCVBUFFORCE:
			if (!capable(CAP_NET_ADMIN)) {
				ret = -EPERM;
				break;
			}
			goto set_rcvbuf;

		case SO_KEEPALIVE:
#ifdef CONFIG_INET
			if (sk->sk_protocol == IPPROTO_TCP)
				tcp_set_keepalive(sk, valbool);
#endif
			sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
			break;

	 	case SO_OOBINLINE:
			sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
			break;

	 	case SO_NO_CHECK:
			sk->sk_no_check = valbool;
			break;

		case SO_PRIORITY:
			if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 
				sk->sk_priority = val;
			else
				ret = -EPERM;
			break;

		case SO_LINGER:
			if(optlen<sizeof(ling)) {
				ret = -EINVAL;	/* 1003.1g */
				break;
			}
			if (copy_from_user(&ling,optval,sizeof(ling))) {
				ret = -EFAULT;
				break;
			}
			if (!ling.l_onoff)
				sock_reset_flag(sk, SOCK_LINGER);
			else {
#if (BITS_PER_LONG == 32)
				if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
					sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
				else
#endif
					sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
				sock_set_flag(sk, SOCK_LINGER);
			}
			break;

		case SO_BSDCOMPAT:
			sock_warn_obsolete_bsdism("setsockopt");
			break;

		case SO_PASSCRED:
			if (valbool)
				set_bit(SOCK_PASSCRED, &sock->flags);
			else
				clear_bit(SOCK_PASSCRED, &sock->flags);
			break;

		case SO_TIMESTAMP:
			if (valbool)  {
				sock_set_flag(sk, SOCK_RCVTSTAMP);
				sock_enable_timestamp(sk);
			} else
				sock_reset_flag(sk, SOCK_RCVTSTAMP);
			break;

		case SO_RCVLOWAT:
			if (val < 0)
				val = INT_MAX;
			sk->sk_rcvlowat = val ? : 1;
			break;

		case SO_RCVTIMEO:
			ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
			break;

		case SO_SNDTIMEO:
			ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
			break;

#ifdef CONFIG_NETDEVICES
		case SO_BINDTODEVICE:
		{
			char devname[IFNAMSIZ]; 

			/* Sorry... */ 
			if (!capable(CAP_NET_RAW)) {
				ret = -EPERM;
				break;
			}

			/* Bind this socket to a particular device like "eth0",
			 * as specified in the passed interface name. If the
			 * name is "" or the option length is zero the socket 
			 * is not bound. 
			 */ 

			if (!valbool) {
				sk->sk_bound_dev_if = 0;
			} else {
				if (optlen > IFNAMSIZ - 1)
					optlen = IFNAMSIZ - 1;
				memset(devname, 0, sizeof(devname));
				if (copy_from_user(devname, optval, optlen)) {
					ret = -EFAULT;
					break;
				}

				/* Remove any cached route for this socket. */
				sk_dst_reset(sk);

				if (devname[0] == '\0') {
					sk->sk_bound_dev_if = 0;
				} else {
					struct net_device *dev = dev_get_by_name(devname);
					if (!dev) {
						ret = -ENODEV;
						break;
					}
					sk->sk_bound_dev_if = dev->ifindex;
					dev_put(dev);
				}
			}
			break;
		}
#endif


		case SO_ATTACH_FILTER:
			ret = -EINVAL;
			if (optlen == sizeof(struct sock_fprog)) {
				struct sock_fprog fprog;

				ret = -EFAULT;
				if (copy_from_user(&fprog, optval, sizeof(fprog)))
					break;

				ret = sk_attach_filter(&fprog, sk);
			}
			break;

		case SO_DETACH_FILTER:
			spin_lock_bh(&sk->sk_lock.slock);
			filter = sk->sk_filter;
                        if (filter) {
				sk->sk_filter = NULL;
				spin_unlock_bh(&sk->sk_lock.slock);
				sk_filter_release(sk, filter);
				break;
			}
			spin_unlock_bh(&sk->sk_lock.slock);
			ret = -ENONET;
			break;

		/* We implement the SO_SNDLOWAT etc to
		   not be settable (1003.1g 5.3) */
		default:
		  	ret = -ENOPROTOOPT;
			break;
  	}
	release_sock(sk);
	return ret;
}
Exemple #18
0
/**
 * dump_common_audit_data - helper to dump common audit data
 * @a : common audit data
 *
 */
static void dump_common_audit_data(struct audit_buffer *ab,
				   struct common_audit_data *a)
{
	char comm[sizeof(current->comm)];

	/*
	 * To keep stack sizes in check force programers to notice if they
	 * start making this union too large!  See struct lsm_network_audit
	 * as an example of how to deal with large data.
	 */
	BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);

	audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
	audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm)));

	switch (a->type) {
	case LSM_AUDIT_DATA_NONE:
		return;
	case LSM_AUDIT_DATA_IPC:
		audit_log_format(ab, " key=%d ", a->u.ipc_id);
		break;
	case LSM_AUDIT_DATA_CAP:
		audit_log_format(ab, " capability=%d ", a->u.cap);
		break;
	case LSM_AUDIT_DATA_PATH: {
		struct inode *inode;

		audit_log_d_path(ab, " path=", &a->u.path);

		inode = d_backing_inode(a->u.path.dentry);
		if (inode) {
			audit_log_format(ab, " dev=");
			audit_log_untrustedstring(ab, inode->i_sb->s_id);
			audit_log_format(ab, " ino=%lu", inode->i_ino);
		}
		break;
	}
	case LSM_AUDIT_DATA_FILE: {
		struct inode *inode;

		audit_log_d_path(ab, " path=", &a->u.file->f_path);

		inode = file_inode(a->u.file);
		if (inode) {
			audit_log_format(ab, " dev=");
			audit_log_untrustedstring(ab, inode->i_sb->s_id);
			audit_log_format(ab, " ino=%lu", inode->i_ino);
		}
		break;
	}
	case LSM_AUDIT_DATA_IOCTL_OP: {
		struct inode *inode;

		audit_log_d_path(ab, " path=", &a->u.op->path);

		inode = a->u.op->path.dentry->d_inode;
		if (inode) {
			audit_log_format(ab, " dev=");
			audit_log_untrustedstring(ab, inode->i_sb->s_id);
			audit_log_format(ab, " ino=%lu", inode->i_ino);
		}

		audit_log_format(ab, " ioctlcmd=0x%hx", a->u.op->cmd);
		break;
	}
	case LSM_AUDIT_DATA_DENTRY: {
		struct inode *inode;

		audit_log_format(ab, " name=");
		audit_log_untrustedstring(ab, a->u.dentry->d_name.name);

		inode = d_backing_inode(a->u.dentry);
		if (inode) {
			audit_log_format(ab, " dev=");
			audit_log_untrustedstring(ab, inode->i_sb->s_id);
			audit_log_format(ab, " ino=%lu", inode->i_ino);
		}
		break;
	}
	case LSM_AUDIT_DATA_INODE: {
		struct dentry *dentry;
		struct inode *inode;

		inode = a->u.inode;
		dentry = d_find_alias(inode);
		if (dentry) {
			audit_log_format(ab, " name=");
			audit_log_untrustedstring(ab,
					 dentry->d_name.name);
			dput(dentry);
		}
		audit_log_format(ab, " dev=");
		audit_log_untrustedstring(ab, inode->i_sb->s_id);
		audit_log_format(ab, " ino=%lu", inode->i_ino);
		break;
	}
	case LSM_AUDIT_DATA_TASK: {
		struct task_struct *tsk = a->u.tsk;
		if (tsk) {
			pid_t pid = task_tgid_nr(tsk);
			if (pid) {
				char comm[sizeof(tsk->comm)];
				audit_log_format(ab, " opid=%d ocomm=", pid);
				audit_log_untrustedstring(ab,
				    memcpy(comm, tsk->comm, sizeof(comm)));
			}
		}
		break;
	}
	case LSM_AUDIT_DATA_NET:
		if (a->u.net->sk) {
			struct sock *sk = a->u.net->sk;
			struct unix_sock *u;
			struct unix_address *addr;
			int len = 0;
			char *p = NULL;

			switch (sk->sk_family) {
			case AF_INET: {
				struct inet_sock *inet = inet_sk(sk);

				print_ipv4_addr(ab, inet->inet_rcv_saddr,
						inet->inet_sport,
						"laddr", "lport");
				print_ipv4_addr(ab, inet->inet_daddr,
						inet->inet_dport,
						"faddr", "fport");
				break;
			}
#if IS_ENABLED(CONFIG_IPV6)
			case AF_INET6: {
				struct inet_sock *inet = inet_sk(sk);

				print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
						inet->inet_sport,
						"laddr", "lport");
				print_ipv6_addr(ab, &sk->sk_v6_daddr,
						inet->inet_dport,
						"faddr", "fport");
				break;
			}
#endif
			case AF_UNIX:
				u = unix_sk(sk);
				addr = smp_load_acquire(&u->addr);
				if (!addr)
					break;
				if (u->path.dentry) {
					audit_log_d_path(ab, " path=", &u->path);
					break;
				}
				len = addr->len-sizeof(short);
				p = &addr->name->sun_path[0];
				audit_log_format(ab, " path=");
				if (*p)
					audit_log_untrustedstring(ab, p);
				else
					audit_log_n_hex(ab, p, len);
				break;
			}
		}

		switch (a->u.net->family) {
		case AF_INET:
			print_ipv4_addr(ab, a->u.net->v4info.saddr,
					a->u.net->sport,
					"saddr", "src");
			print_ipv4_addr(ab, a->u.net->v4info.daddr,
					a->u.net->dport,
					"daddr", "dest");
			break;
		case AF_INET6:
			print_ipv6_addr(ab, &a->u.net->v6info.saddr,
					a->u.net->sport,
					"saddr", "src");
			print_ipv6_addr(ab, &a->u.net->v6info.daddr,
					a->u.net->dport,
					"daddr", "dest");
			break;
		}
		if (a->u.net->netif > 0) {
			struct net_device *dev;

			/* NOTE: we always use init's namespace */
			dev = dev_get_by_index(&init_net, a->u.net->netif);
			if (dev) {
				audit_log_format(ab, " netif=%s", dev->name);
				dev_put(dev);
			}
		}
		break;
#ifdef CONFIG_KEYS
	case LSM_AUDIT_DATA_KEY:
		audit_log_format(ab, " key_serial=%u", a->u.key_struct.key);
		if (a->u.key_struct.key_desc) {
			audit_log_format(ab, " key_desc=");
			audit_log_untrustedstring(ab, a->u.key_struct.key_desc);
		}
		break;
#endif
	case LSM_AUDIT_DATA_KMOD:
		audit_log_format(ab, " kmod=");
		audit_log_untrustedstring(ab, a->u.kmod_name);
		break;
	case LSM_AUDIT_DATA_IBPKEY: {
		struct in6_addr sbn_pfx;

		memset(&sbn_pfx.s6_addr, 0,
		       sizeof(sbn_pfx.s6_addr));
		memcpy(&sbn_pfx.s6_addr, &a->u.ibpkey->subnet_prefix,
		       sizeof(a->u.ibpkey->subnet_prefix));
		audit_log_format(ab, " pkey=0x%x subnet_prefix=%pI6c",
				 a->u.ibpkey->pkey, &sbn_pfx);
		break;
	}
	case LSM_AUDIT_DATA_IBENDPORT:
		audit_log_format(ab, " device=%s port_num=%u",
				 a->u.ibendport->dev_name,
				 a->u.ibendport->port);
		break;
	} /* switch (a->type) */
}
Exemple #19
0
int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
		       const struct ib_wc *wc, const struct ib_grh *grh,
		       struct ib_ah_attr *ah_attr)
{
	u32 flow_class;
	u16 gid_index;
	int ret;
	enum rdma_network_type net_type = RDMA_NETWORK_IB;
	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
	int hoplimit = 0xff;
	union ib_gid dgid;
	union ib_gid sgid;

	memset(ah_attr, 0, sizeof *ah_attr);
	if (rdma_cap_eth_ah(device, port_num)) {
		if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
			net_type = wc->network_hdr_type;
		else
			net_type = ib_get_net_type_by_grh(device, port_num, grh);
		gid_type = ib_network_to_gid_type(net_type);
	}
	ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
				     &sgid, &dgid);
	if (ret)
		return ret;

	if (rdma_protocol_roce(device, port_num)) {
		int if_index = 0;
		u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
				wc->vlan_id : 0xffff;
		struct net_device *idev;
		struct net_device *resolved_dev;

		if (!(wc->wc_flags & IB_WC_GRH))
			return -EPROTOTYPE;

		if (!device->get_netdev)
			return -EOPNOTSUPP;

		idev = device->get_netdev(device, port_num);
		if (!idev)
			return -ENODEV;

		ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid,
						   ah_attr->dmac,
						   wc->wc_flags & IB_WC_WITH_VLAN ?
						   NULL : &vlan_id,
						   &if_index, &hoplimit);
		if (ret) {
			dev_put(idev);
			return ret;
		}

		resolved_dev = dev_get_by_index(&init_net, if_index);
		if (resolved_dev->flags & IFF_LOOPBACK) {
			dev_put(resolved_dev);
			resolved_dev = idev;
			dev_hold(resolved_dev);
		}
		rcu_read_lock();
		if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
								   resolved_dev))
			ret = -EHOSTUNREACH;
		rcu_read_unlock();
		dev_put(idev);
		dev_put(resolved_dev);
		if (ret)
			return ret;

		ret = get_sgid_index_from_eth(device, port_num, vlan_id,
					      &dgid, gid_type, &gid_index);
		if (ret)
			return ret;
	}

	ah_attr->dlid = wc->slid;
	ah_attr->sl = wc->sl;
	ah_attr->src_path_bits = wc->dlid_path_bits;
	ah_attr->port_num = port_num;

	if (wc->wc_flags & IB_WC_GRH) {
		ah_attr->ah_flags = IB_AH_GRH;
		ah_attr->grh.dgid = sgid;

		if (!rdma_cap_eth_ah(device, port_num)) {
			if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
				ret = ib_find_cached_gid_by_port(device, &dgid,
								 IB_GID_TYPE_IB,
								 port_num, NULL,
								 &gid_index);
				if (ret)
					return ret;
			} else {
				gid_index = 0;
			}
		}

		ah_attr->grh.sgid_index = (u8) gid_index;
		flow_class = be32_to_cpu(grh->version_tclass_flow);
		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
		ah_attr->grh.hop_limit = hoplimit;
		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
	}
Exemple #20
0
static int tcf_mirred_init(struct rtattr *rta, struct rtattr *est,
			   struct tc_action *a, int ovr, int bind)
{
	struct rtattr *tb[TCA_MIRRED_MAX];
	struct tc_mirred *parm;
	struct tcf_mirred *m;
	struct tcf_common *pc;
	struct net_device *dev = NULL;
	int ret = 0;
	int ok_push = 0;

	if (rta == NULL || rtattr_parse_nested(tb, TCA_MIRRED_MAX, rta) < 0)
		return -EINVAL;

	if (tb[TCA_MIRRED_PARMS-1] == NULL ||
	    RTA_PAYLOAD(tb[TCA_MIRRED_PARMS-1]) < sizeof(*parm))
		return -EINVAL;
	parm = RTA_DATA(tb[TCA_MIRRED_PARMS-1]);

	if (parm->ifindex) {
		dev = __dev_get_by_index(parm->ifindex);
		if (dev == NULL)
			return -ENODEV;
		switch (dev->type) {
			case ARPHRD_TUNNEL:
			case ARPHRD_TUNNEL6:
			case ARPHRD_SIT:
			case ARPHRD_IPGRE:
			case ARPHRD_VOID:
			case ARPHRD_NONE:
				ok_push = 0;
				break;
			default:
				ok_push = 1;
				break;
		}
	}

	pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info);
	if (!pc) {
		if (!parm->ifindex)
			return -EINVAL;
		pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind,
				     &mirred_idx_gen, &mirred_hash_info);
		if (unlikely(!pc))
			return -ENOMEM;
		ret = ACT_P_CREATED;
	} else {
		if (!ovr) {
			tcf_mirred_release(to_mirred(pc), bind);
			return -EEXIST;
		}
	}
	m = to_mirred(pc);

	spin_lock_bh(&m->tcf_lock);
	m->tcf_action = parm->action;
	m->tcfm_eaction = parm->eaction;
	if (parm->ifindex) {
		m->tcfm_ifindex = parm->ifindex;
		if (ret != ACT_P_CREATED)
			dev_put(m->tcfm_dev);
		m->tcfm_dev = dev;
		dev_hold(dev);
		m->tcfm_ok_push = ok_push;
	}
	spin_unlock_bh(&m->tcf_lock);
	if (ret == ACT_P_CREATED)
		tcf_hash_insert(pc, &mirred_hash_info);

	return ret;
}
Exemple #21
0
static int mif6_add(struct mif6ctl *vifc, int mrtsock)
{
	int vifi = vifc->mif6c_mifi;
	struct mif_device *v = &vif6_table[vifi];
	struct net_device *dev;

	/* Is vif busy ? */
	if (MIF_EXISTS(vifi))
		return -EADDRINUSE;

	switch (vifc->mif6c_flags) {
#ifdef CONFIG_IPV6_PIMSM_V2
	case MIFF_REGISTER:
		/*
		 * Special Purpose VIF in PIM
		 * All the packets will be sent to the daemon
		 */
		if (reg_vif_num >= 0)
			return -EADDRINUSE;
		dev = ip6mr_reg_vif();
		if (!dev)
			return -ENOBUFS;
		break;
#endif
	case 0:
		dev = dev_get_by_index(vifc->mif6c_pifi);
		if (!dev)
			return -EADDRNOTAVAIL;
		dev_put(dev);
		break;
	default:
		return -EINVAL;
	}

	dev_set_allmulti(dev, 1);

	/*
	 *	Fill in the VIF structures
	 */
	v->rate_limit = vifc->vifc_rate_limit;
	v->flags = vifc->mif6c_flags;
	if (!mrtsock)
		v->flags |= VIFF_STATIC;
	v->threshold = vifc->vifc_threshold;
	v->bytes_in = 0;
	v->bytes_out = 0;
	v->pkt_in = 0;
	v->pkt_out = 0;
	v->link = dev->ifindex;
	if (v->flags & MIFF_REGISTER)
		v->link = dev->iflink;

	/* And finish update writing critical data */
	write_lock_bh(&mrt_lock);
	dev_hold(dev);
	v->dev = dev;
#ifdef CONFIG_IPV6_PIMSM_V2
	if (v->flags & MIFF_REGISTER)
		reg_vif_num = vifi;
#endif
	if (vifi + 1 > maxvif)
		maxvif = vifi + 1;
	write_unlock_bh(&mrt_lock);
	return 0;
}
Exemple #22
0
/**
 * dump_common_audit_data - helper to dump common audit data
 * @a : common audit data
 *
 */
static void dump_common_audit_data(struct audit_buffer *ab,
				   struct common_audit_data *a)
{
	struct task_struct *tsk = current;

	if (a->tsk)
		tsk = a->tsk;
	if (tsk && tsk->pid) {
		audit_log_format(ab, " pid=%d comm=", tsk->pid);
		audit_log_untrustedstring(ab, tsk->comm);
	}

	switch (a->type) {
	case LSM_AUDIT_DATA_NONE:
		return;
	case LSM_AUDIT_DATA_IPC:
		audit_log_format(ab, " key=%d ", a->u.ipc_id);
		break;
	case LSM_AUDIT_DATA_CAP:
		audit_log_format(ab, " capability=%d ", a->u.cap);
		break;
	case LSM_AUDIT_DATA_PATH: {
		struct inode *inode;

		audit_log_d_path(ab, " path=", &a->u.path);

		inode = a->u.path.dentry->d_inode;
		if (inode) {
			audit_log_format(ab, " dev=");
			audit_log_untrustedstring(ab, inode->i_sb->s_id);
			audit_log_format(ab, " ino=%lu", inode->i_ino);
		}
		break;
	}
	case LSM_AUDIT_DATA_DENTRY: {
		struct inode *inode;

		audit_log_format(ab, " name=");
		audit_log_untrustedstring(ab, a->u.dentry->d_name.name);

		inode = a->u.dentry->d_inode;
		if (inode) {
			audit_log_format(ab, " dev=");
			audit_log_untrustedstring(ab, inode->i_sb->s_id);
			audit_log_format(ab, " ino=%lu", inode->i_ino);
		}
		break;
	}
	case LSM_AUDIT_DATA_INODE: {
		struct dentry *dentry;
		struct inode *inode;

		inode = a->u.inode;
		dentry = d_find_alias(inode);
		if (dentry) {
			audit_log_format(ab, " name=");
			audit_log_untrustedstring(ab,
					 dentry->d_name.name);
			dput(dentry);
		}
		audit_log_format(ab, " dev=");
		audit_log_untrustedstring(ab, inode->i_sb->s_id);
		audit_log_format(ab, " ino=%lu", inode->i_ino);
		break;
	}
	case LSM_AUDIT_DATA_TASK:
		tsk = a->u.tsk;
		if (tsk && tsk->pid) {
			audit_log_format(ab, " pid=%d comm=", tsk->pid);
			audit_log_untrustedstring(ab, tsk->comm);
		}
		break;
	case LSM_AUDIT_DATA_NET:
		if (a->u.net.sk) {
			struct sock *sk = a->u.net.sk;
			struct unix_sock *u;
			int len = 0;
			char *p = NULL;

			switch (sk->sk_family) {
			case AF_INET: {
				struct inet_sock *inet = inet_sk(sk);

				print_ipv4_addr(ab, inet->inet_rcv_saddr,
						inet->inet_sport,
						"laddr", "lport");
				print_ipv4_addr(ab, inet->inet_daddr,
						inet->inet_dport,
						"faddr", "fport");
				break;
			}
			case AF_INET6: {
				struct inet_sock *inet = inet_sk(sk);
				struct ipv6_pinfo *inet6 = inet6_sk(sk);

				print_ipv6_addr(ab, &inet6->rcv_saddr,
						inet->inet_sport,
						"laddr", "lport");
				print_ipv6_addr(ab, &inet6->daddr,
						inet->inet_dport,
						"faddr", "fport");
				break;
			}
			case AF_UNIX:
				u = unix_sk(sk);
				if (u->dentry) {
					struct path path = {
						.dentry = u->dentry,
						.mnt = u->mnt
					};
					audit_log_d_path(ab, " path=", &path);
					break;
				}
				if (!u->addr)
					break;
				len = u->addr->len-sizeof(short);
				p = &u->addr->name->sun_path[0];
				audit_log_format(ab, " path=");
				if (*p)
					audit_log_untrustedstring(ab, p);
				else
					audit_log_n_hex(ab, p, len);
				break;
			}
		}

		switch (a->u.net.family) {
		case AF_INET:
			print_ipv4_addr(ab, a->u.net.v4info.saddr,
					a->u.net.sport,
					"saddr", "src");
			print_ipv4_addr(ab, a->u.net.v4info.daddr,
					a->u.net.dport,
					"daddr", "dest");
			break;
		case AF_INET6:
			print_ipv6_addr(ab, &a->u.net.v6info.saddr,
					a->u.net.sport,
					"saddr", "src");
			print_ipv6_addr(ab, &a->u.net.v6info.daddr,
					a->u.net.dport,
					"daddr", "dest");
			break;
		}
		if (a->u.net.netif > 0) {
			struct net_device *dev;

			/* NOTE: we always use init's namespace */
			dev = dev_get_by_index(&init_net, a->u.net.netif);
			if (dev) {
				audit_log_format(ab, " netif=%s", dev->name);
				dev_put(dev);
			}
		}
		break;
#ifdef CONFIG_KEYS
	case LSM_AUDIT_DATA_KEY:
		audit_log_format(ab, " key_serial=%u", a->u.key_struct.key);
		if (a->u.key_struct.key_desc) {
			audit_log_format(ab, " key_desc=");
			audit_log_untrustedstring(ab, a->u.key_struct.key_desc);
		}
		break;
#endif
	case LSM_AUDIT_DATA_KMOD:
		audit_log_format(ab, " kmod=");
		audit_log_untrustedstring(ab, a->u.kmod_name);
		break;
	} /* switch (a->type) */
}

/**
 * common_lsm_audit - generic LSM auditing function
 * @a:  auxiliary audit data
 *
 * setup the audit buffer for common security information
 * uses callback to print LSM specific information
 */
void common_lsm_audit(struct common_audit_data *a)
{
	struct audit_buffer *ab;

	if (a == NULL)
		return;
	/* we use GFP_ATOMIC so we won't sleep */
	ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC);

	if (ab == NULL)
		return;

	if (a->lsm_pre_audit)
		a->lsm_pre_audit(ab, a);

	dump_common_audit_data(ab, a);

	if (a->lsm_post_audit)
		a->lsm_post_audit(ab, a);

	audit_log_end(ab);
}
int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
{
	struct net_device *dev = NULL;
	struct ieee802154_mlme_ops *ops;
	struct ieee802154_mac_params params;
	struct wpan_phy *phy;
	int rc = -EINVAL;

	pr_debug("%s\n", __func__);

	dev = ieee802154_nl_get_dev(info);
	if (!dev)
		return -ENODEV;

	ops = ieee802154_mlme_ops(dev);

	if (!ops->get_mac_params || !ops->set_mac_params) {
		rc = -EOPNOTSUPP;
		goto out;
	}

	if (netif_running(dev)) {
		rc = -EBUSY;
		goto out;
	}

	if (!info->attrs[IEEE802154_ATTR_LBT_ENABLED] &&
	    !info->attrs[IEEE802154_ATTR_CCA_MODE] &&
	    !info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL] &&
	    !info->attrs[IEEE802154_ATTR_CSMA_RETRIES] &&
	    !info->attrs[IEEE802154_ATTR_CSMA_MIN_BE] &&
	    !info->attrs[IEEE802154_ATTR_CSMA_MAX_BE] &&
	    !info->attrs[IEEE802154_ATTR_FRAME_RETRIES])
		goto out;

	phy = dev->ieee802154_ptr->wpan_phy;
	get_device(&phy->dev);

	rtnl_lock();
	ops->get_mac_params(dev, &params);

	if (info->attrs[IEEE802154_ATTR_TXPOWER])
		params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]);

	if (info->attrs[IEEE802154_ATTR_LBT_ENABLED])
		params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);

	if (info->attrs[IEEE802154_ATTR_CCA_MODE])
		params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);

	if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL])
		params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]);

	if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES])
		params.csma_retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]);

	if (info->attrs[IEEE802154_ATTR_CSMA_MIN_BE])
		params.min_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MIN_BE]);

	if (info->attrs[IEEE802154_ATTR_CSMA_MAX_BE])
		params.max_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MAX_BE]);

	if (info->attrs[IEEE802154_ATTR_FRAME_RETRIES])
		params.frame_retries = nla_get_s8(info->attrs[IEEE802154_ATTR_FRAME_RETRIES]);

	rc = ops->set_mac_params(dev, &params);
	rtnl_unlock();

	wpan_phy_put(phy);
	dev_put(dev);

	return 0;

out:
	dev_put(dev);
	return rc;
}
Exemple #24
0
static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
		    char __user *optval, unsigned int optlen)
{
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct net *net = sock_net(sk);
	int val, valbool;
	int retv = -ENOPROTOOPT;
	bool needs_rtnl = setsockopt_needs_rtnl(optname);

	if (!optval)
		val = 0;
	else {
		if (optlen >= sizeof(int)) {
			if (get_user(val, (int __user *) optval))
				return -EFAULT;
		} else
			val = 0;
	}

	valbool = (val != 0);

	if (ip6_mroute_opt(optname))
		return ip6_mroute_setsockopt(sk, optname, optval, optlen);

	if (needs_rtnl)
		rtnl_lock();
	lock_sock(sk);

	switch (optname) {

	case IPV6_ADDRFORM:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val == PF_INET) {
			struct ipv6_txoptions *opt;
			struct sk_buff *pktopt;

			if (sk->sk_type == SOCK_RAW)
				break;

			if (sk->sk_protocol == IPPROTO_UDP ||
			    sk->sk_protocol == IPPROTO_UDPLITE) {
				struct udp_sock *up = udp_sk(sk);
				if (up->pending == AF_INET6) {
					retv = -EBUSY;
					break;
				}
			} else if (sk->sk_protocol != IPPROTO_TCP)
				break;

			if (sk->sk_state != TCP_ESTABLISHED) {
				retv = -ENOTCONN;
				break;
			}

			if (ipv6_only_sock(sk) ||
			    !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
				retv = -EADDRNOTAVAIL;
				break;
			}

			fl6_free_socklist(sk);
			ipv6_sock_mc_close(sk);

			/*
			 * Sock is moving from IPv6 to IPv4 (sk_prot), so
			 * remove it from the refcnt debug socks count in the
			 * original family...
			 */
			sk_refcnt_debug_dec(sk);

			if (sk->sk_protocol == IPPROTO_TCP) {
				struct inet_connection_sock *icsk = inet_csk(sk);
				local_bh_disable();
				sock_prot_inuse_add(net, sk->sk_prot, -1);
				sock_prot_inuse_add(net, &tcp_prot, 1);
				local_bh_enable();
				sk->sk_prot = &tcp_prot;
				icsk->icsk_af_ops = &ipv4_specific;
				sk->sk_socket->ops = &inet_stream_ops;
				sk->sk_family = PF_INET;
				tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
			} else {
				struct proto *prot = &udp_prot;

				if (sk->sk_protocol == IPPROTO_UDPLITE)
					prot = &udplite_prot;
				local_bh_disable();
				sock_prot_inuse_add(net, sk->sk_prot, -1);
				sock_prot_inuse_add(net, prot, 1);
				local_bh_enable();
				sk->sk_prot = prot;
				sk->sk_socket->ops = &inet_dgram_ops;
				sk->sk_family = PF_INET;
			}
			opt = xchg((__force struct ipv6_txoptions **)&np->opt,
				   NULL);
			if (opt) {
				atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
				txopt_put(opt);
			}
			pktopt = xchg(&np->pktoptions, NULL);
			kfree_skb(pktopt);

			sk->sk_destruct = inet_sock_destruct;
			/*
			 * ... and add it to the refcnt debug socks count
			 * in the new family. -acme
			 */
			sk_refcnt_debug_inc(sk);
			module_put(THIS_MODULE);
			retv = 0;
			break;
		}
		goto e_inval;

	case IPV6_V6ONLY:
		if (optlen < sizeof(int) ||
		    inet_sk(sk)->inet_num)
			goto e_inval;
		sk->sk_ipv6only = valbool;
		retv = 0;
		break;

	case IPV6_RECVPKTINFO:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxinfo = valbool;
		retv = 0;
		break;

	case IPV6_2292PKTINFO:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxoinfo = valbool;
		retv = 0;
		break;

	case IPV6_RECVHOPLIMIT:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxhlim = valbool;
		retv = 0;
		break;

	case IPV6_2292HOPLIMIT:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxohlim = valbool;
		retv = 0;
		break;

	case IPV6_RECVRTHDR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.srcrt = valbool;
		retv = 0;
		break;

	case IPV6_2292RTHDR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.osrcrt = valbool;
		retv = 0;
		break;

	case IPV6_RECVHOPOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.hopopts = valbool;
		retv = 0;
		break;

	case IPV6_2292HOPOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.ohopopts = valbool;
		retv = 0;
		break;

	case IPV6_RECVDSTOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.dstopts = valbool;
		retv = 0;
		break;

	case IPV6_2292DSTOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.odstopts = valbool;
		retv = 0;
		break;

	case IPV6_TCLASS:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val < -1 || val > 0xff)
			goto e_inval;
		/* RFC 3542, 6.5: default traffic class of 0x0 */
		if (val == -1)
			val = 0;
		np->tclass = val;
		retv = 0;
		break;

	case IPV6_RECVTCLASS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxtclass = valbool;
		retv = 0;
		break;

	case IPV6_FLOWINFO:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxflow = valbool;
		retv = 0;
		break;

	case IPV6_RECVPATHMTU:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxpmtu = valbool;
		retv = 0;
		break;

	case IPV6_TRANSPARENT:
		if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) &&
		    !ns_capable(net->user_ns, CAP_NET_RAW)) {
			retv = -EPERM;
			break;
		}
		if (optlen < sizeof(int))
			goto e_inval;
		/* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */
		inet_sk(sk)->transparent = valbool;
		retv = 0;
		break;

	case IPV6_RECVORIGDSTADDR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxorigdstaddr = valbool;
		retv = 0;
		break;

	case IPV6_HOPOPTS:
	case IPV6_RTHDRDSTOPTS:
	case IPV6_RTHDR:
	case IPV6_DSTOPTS:
	{
		struct ipv6_txoptions *opt;

		/* remove any sticky options header with a zero option
		 * length, per RFC3542.
		 */
		if (optlen == 0)
			optval = NULL;
		else if (!optval)
			goto e_inval;
		else if (optlen < sizeof(struct ipv6_opt_hdr) ||
			 optlen & 0x7 || optlen > 8 * 255)
			goto e_inval;

		/* hop-by-hop / destination options are privileged option */
		retv = -EPERM;
		if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
			break;

		opt = rcu_dereference_protected(np->opt,
						lockdep_sock_is_held(sk));
		opt = ipv6_renew_options(sk, opt, optname,
					 (struct ipv6_opt_hdr __user *)optval,
					 optlen);
		if (IS_ERR(opt)) {
			retv = PTR_ERR(opt);
			break;
		}

		/* routing header option needs extra check */
		retv = -EINVAL;
		if (optname == IPV6_RTHDR && opt && opt->srcrt) {
			struct ipv6_rt_hdr *rthdr = opt->srcrt;
			switch (rthdr->type) {
#if IS_ENABLED(CONFIG_IPV6_MIP6)
			case IPV6_SRCRT_TYPE_2:
				if (rthdr->hdrlen != 2 ||
				    rthdr->segments_left != 1)
					goto sticky_done;

				break;
#endif
			default:
				goto sticky_done;
			}
		}

		retv = 0;
		opt = ipv6_update_options(sk, opt);
sticky_done:
		if (opt) {
			atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
			txopt_put(opt);
		}
		break;
	}

	case IPV6_PKTINFO:
	{
		struct in6_pktinfo pkt;

		if (optlen == 0)
			goto e_inval;
		else if (optlen < sizeof(struct in6_pktinfo) || !optval)
			goto e_inval;

		if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) {
				retv = -EFAULT;
				break;
		}
		if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if)
			goto e_inval;

		np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
		np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr;
		retv = 0;
		break;
	}

	case IPV6_2292PKTOPTIONS:
	{
		struct ipv6_txoptions *opt = NULL;
		struct msghdr msg;
		struct flowi6 fl6;
		struct sockcm_cookie sockc_junk;
		struct ipcm6_cookie ipc6;

		memset(&fl6, 0, sizeof(fl6));
		fl6.flowi6_oif = sk->sk_bound_dev_if;
		fl6.flowi6_mark = sk->sk_mark;

		if (optlen == 0)
			goto update;

		/* 1K is probably excessive
		 * 1K is surely not enough, 2K per standard header is 16K.
		 */
		retv = -EINVAL;
		if (optlen > 64*1024)
			break;

		opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL);
		retv = -ENOBUFS;
		if (!opt)
			break;

		memset(opt, 0, sizeof(*opt));
		atomic_set(&opt->refcnt, 1);
		opt->tot_len = sizeof(*opt) + optlen;
		retv = -EFAULT;
		if (copy_from_user(opt+1, optval, optlen))
			goto done;

		msg.msg_controllen = optlen;
		msg.msg_control = (void *)(opt+1);
		ipc6.opt = opt;

		retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6, &sockc_junk);
		if (retv)
			goto done;
update:
		retv = 0;
		opt = ipv6_update_options(sk, opt);
done:
		if (opt) {
			atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
			txopt_put(opt);
		}
		break;
	}
	case IPV6_UNICAST_HOPS:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val > 255 || val < -1)
			goto e_inval;
		np->hop_limit = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_HOPS:
		if (sk->sk_type == SOCK_STREAM)
			break;
		if (optlen < sizeof(int))
			goto e_inval;
		if (val > 255 || val < -1)
			goto e_inval;
		np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
		retv = 0;
		break;

	case IPV6_MULTICAST_LOOP:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val != valbool)
			goto e_inval;
		np->mc_loop = valbool;
		retv = 0;
		break;

	case IPV6_UNICAST_IF:
	{
		struct net_device *dev = NULL;
		int ifindex;

		if (optlen != sizeof(int))
			goto e_inval;

		ifindex = (__force int)ntohl((__force __be32)val);
		if (ifindex == 0) {
			np->ucast_oif = 0;
			retv = 0;
			break;
		}

		dev = dev_get_by_index(net, ifindex);
		retv = -EADDRNOTAVAIL;
		if (!dev)
			break;
		dev_put(dev);

		retv = -EINVAL;
		if (sk->sk_bound_dev_if)
			break;

		np->ucast_oif = ifindex;
		retv = 0;
		break;
	}

	case IPV6_MULTICAST_IF:
		if (sk->sk_type == SOCK_STREAM)
			break;
		if (optlen < sizeof(int))
			goto e_inval;

		if (val) {
			struct net_device *dev;

			if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
				goto e_inval;

			dev = dev_get_by_index(net, val);
			if (!dev) {
				retv = -ENODEV;
				break;
			}
			dev_put(dev);
		}
		np->mcast_oif = val;
		retv = 0;
		break;
	case IPV6_ADD_MEMBERSHIP:
	case IPV6_DROP_MEMBERSHIP:
	{
		struct ipv6_mreq mreq;

		if (optlen < sizeof(struct ipv6_mreq))
			goto e_inval;

		retv = -EPROTO;
		if (inet_sk(sk)->is_icsk)
			break;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_ADD_MEMBERSHIP)
			retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		else
			retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		break;
	}
	case IPV6_JOIN_ANYCAST:
	case IPV6_LEAVE_ANYCAST:
	{
		struct ipv6_mreq mreq;

		if (optlen < sizeof(struct ipv6_mreq))
			goto e_inval;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_JOIN_ANYCAST)
			retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
		else
			retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
		break;
	}
	case MCAST_JOIN_GROUP:
	case MCAST_LEAVE_GROUP:
	{
		struct group_req greq;
		struct sockaddr_in6 *psin6;

		if (optlen < sizeof(struct group_req))
			goto e_inval;

		retv = -EFAULT;
		if (copy_from_user(&greq, optval, sizeof(struct group_req)))
			break;
		if (greq.gr_group.ss_family != AF_INET6) {
			retv = -EADDRNOTAVAIL;
			break;
		}
		psin6 = (struct sockaddr_in6 *)&greq.gr_group;
		if (optname == MCAST_JOIN_GROUP)
			retv = ipv6_sock_mc_join(sk, greq.gr_interface,
						 &psin6->sin6_addr);
		else
			retv = ipv6_sock_mc_drop(sk, greq.gr_interface,
						 &psin6->sin6_addr);
		break;
	}
	case MCAST_JOIN_SOURCE_GROUP:
	case MCAST_LEAVE_SOURCE_GROUP:
	case MCAST_BLOCK_SOURCE:
	case MCAST_UNBLOCK_SOURCE:
	{
		struct group_source_req greqs;
		int omode, add;

		if (optlen < sizeof(struct group_source_req))
			goto e_inval;
		if (copy_from_user(&greqs, optval, sizeof(greqs))) {
			retv = -EFAULT;
			break;
		}
		if (greqs.gsr_group.ss_family != AF_INET6 ||
		    greqs.gsr_source.ss_family != AF_INET6) {
			retv = -EADDRNOTAVAIL;
			break;
		}
		if (optname == MCAST_BLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 1;
		} else if (optname == MCAST_UNBLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 0;
		} else if (optname == MCAST_JOIN_SOURCE_GROUP) {
			struct sockaddr_in6 *psin6;

			psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
			retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
						 &psin6->sin6_addr);
			/* prior join w/ different source is ok */
			if (retv && retv != -EADDRINUSE)
				break;
			omode = MCAST_INCLUDE;
			add = 1;
		} else /* MCAST_LEAVE_SOURCE_GROUP */ {
			omode = MCAST_INCLUDE;
			add = 0;
		}
		retv = ip6_mc_source(add, omode, sk, &greqs);
		break;
	}
	case MCAST_MSFILTER:
	{
		struct group_filter *gsf;

		if (optlen < GROUP_FILTER_SIZE(0))
			goto e_inval;
		if (optlen > sysctl_optmem_max) {
			retv = -ENOBUFS;
			break;
		}
		gsf = kmalloc(optlen, GFP_KERNEL);
		if (!gsf) {
			retv = -ENOBUFS;
			break;
		}
		retv = -EFAULT;
		if (copy_from_user(gsf, optval, optlen)) {
			kfree(gsf);
			break;
		}
		/* numsrc >= (4G-140)/128 overflow in 32 bits */
		if (gsf->gf_numsrc >= 0x1ffffffU ||
		    gsf->gf_numsrc > sysctl_mld_max_msf) {
			kfree(gsf);
			retv = -ENOBUFS;
			break;
		}
		if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
			kfree(gsf);
			retv = -EINVAL;
			break;
		}
		retv = ip6_mc_msfilter(sk, gsf);
		kfree(gsf);

		break;
	}
	case IPV6_ROUTER_ALERT:
		if (optlen < sizeof(int))
			goto e_inval;
		retv = ip6_ra_control(sk, val);
		break;
	case IPV6_MTU_DISCOVER:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_OMIT)
			goto e_inval;
		np->pmtudisc = val;
		retv = 0;
		break;
	case IPV6_MTU:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val && val < IPV6_MIN_MTU)
			goto e_inval;
		np->frag_size = val;
		retv = 0;
		break;
	case IPV6_RECVERR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->recverr = valbool;
		if (!val)
			skb_queue_purge(&sk->sk_error_queue);
		retv = 0;
		break;
	case IPV6_FLOWINFO_SEND:
		if (optlen < sizeof(int))
			goto e_inval;
		np->sndflow = valbool;
		retv = 0;
		break;
	case IPV6_FLOWLABEL_MGR:
		retv = ipv6_flowlabel_opt(sk, optval, optlen);
		break;
	case IPV6_IPSEC_POLICY:
	case IPV6_XFRM_POLICY:
		retv = -EPERM;
		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
			break;
		retv = xfrm_user_policy(sk, optname, optval, optlen);
		break;

	case IPV6_ADDR_PREFERENCES:
	    {
		unsigned int pref = 0;
		unsigned int prefmask = ~0;

		if (optlen < sizeof(int))
			goto e_inval;

		retv = -EINVAL;

		/* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */
		switch (val & (IPV6_PREFER_SRC_PUBLIC|
			       IPV6_PREFER_SRC_TMP|
			       IPV6_PREFER_SRC_PUBTMP_DEFAULT)) {
		case IPV6_PREFER_SRC_PUBLIC:
			pref |= IPV6_PREFER_SRC_PUBLIC;
			break;
		case IPV6_PREFER_SRC_TMP:
			pref |= IPV6_PREFER_SRC_TMP;
			break;
		case IPV6_PREFER_SRC_PUBTMP_DEFAULT:
			break;
		case 0:
			goto pref_skip_pubtmp;
		default:
			goto e_inval;
		}

		prefmask &= ~(IPV6_PREFER_SRC_PUBLIC|
			      IPV6_PREFER_SRC_TMP);
pref_skip_pubtmp:

		/* check HOME/COA conflicts */
		switch (val & (IPV6_PREFER_SRC_HOME|IPV6_PREFER_SRC_COA)) {
		case IPV6_PREFER_SRC_HOME:
			break;
		case IPV6_PREFER_SRC_COA:
			pref |= IPV6_PREFER_SRC_COA;
		case 0:
			goto pref_skip_coa;
		default:
			goto e_inval;
		}

		prefmask &= ~IPV6_PREFER_SRC_COA;
pref_skip_coa:

		/* check CGA/NONCGA conflicts */
		switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) {
		case IPV6_PREFER_SRC_CGA:
		case IPV6_PREFER_SRC_NONCGA:
		case 0:
			break;
		default:
			goto e_inval;
		}

		np->srcprefs = (np->srcprefs & prefmask) | pref;
		retv = 0;

		break;
	    }
	case IPV6_MINHOPCOUNT:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val < 0 || val > 255)
			goto e_inval;
		np->min_hopcount = val;
		retv = 0;
		break;
	case IPV6_DONTFRAG:
		np->dontfrag = valbool;
		retv = 0;
		break;
	case IPV6_AUTOFLOWLABEL:
		np->autoflowlabel = valbool;
		retv = 0;
		break;
	}

	release_sock(sk);
	if (needs_rtnl)
		rtnl_unlock();

	return retv;

e_inval:
	release_sock(sk);
	if (needs_rtnl)
		rtnl_unlock();
	return -EINVAL;
}
Exemple #25
0
static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
{
	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
	struct sock *sk = sock->sk;
	struct raw_sock *ro = raw_sk(sk);
	int ifindex;
	int err = 0;
	int notify_enetdown = 0;

	if (len < sizeof(*addr))
		return -EINVAL;

	lock_sock(sk);

	if (ro->bound && addr->can_ifindex == ro->ifindex)
		goto out;

	if (addr->can_ifindex) {
		struct net_device *dev;

		dev = dev_get_by_index(&init_net, addr->can_ifindex);
		if (!dev) {
			err = -ENODEV;
			goto out;
		}
		if (dev->type != ARPHRD_CAN) {
			dev_put(dev);
			err = -ENODEV;
			goto out;
		}
		if (!(dev->flags & IFF_UP))
			notify_enetdown = 1;

		ifindex = dev->ifindex;

		/* filters set by default/setsockopt */
		err = raw_enable_allfilters(dev, sk);
		dev_put(dev);
	} else {
		ifindex = 0;

		/* filters set by default/setsockopt */
		err = raw_enable_allfilters(NULL, sk);
	}

	if (!err) {
		if (ro->bound) {
			/* unregister old filters */
			if (ro->ifindex) {
				struct net_device *dev;

				dev = dev_get_by_index(&init_net, ro->ifindex);
				if (dev) {
					raw_disable_allfilters(dev, sk);
					dev_put(dev);
				}
			} else
				raw_disable_allfilters(NULL, sk);
		}
		ro->ifindex = ifindex;
		ro->bound = 1;
	}

 out:
	release_sock(sk);

	if (notify_enetdown) {
		sk->sk_err = ENETDOWN;
		if (!sock_flag(sk, SOCK_DEAD))
			sk->sk_error_report(sk);
	}

	return err;
}
Exemple #26
0
static int atm_mpoa_mpoad_attach(struct atm_vcc *vcc, int arg)
{
	struct mpoa_client *mpc;
	struct lec_priv *priv;
	int err;

	if (mpcs == NULL) {
		init_timer(&mpc_timer);
		mpc_timer_refresh();

		/* This lets us now how our LECs are doing */
		err = register_netdevice_notifier(&mpoa_notifier);
		if (err < 0) {
			del_timer(&mpc_timer);
			return err;
		}
	}

	mpc = find_mpc_by_itfnum(arg);
	if (mpc == NULL) {
		dprintk("allocating new mpc for itf %d\n", arg);
		mpc = alloc_mpc();
		if (mpc == NULL)
			return -ENOMEM;
		mpc->dev_num = arg;
		mpc->dev = find_lec_by_itfnum(arg);
					/* NULL if there was no lec */
	}
	if (mpc->mpoad_vcc) {
		pr_info("mpoad is already present for itf %d\n", arg);
		return -EADDRINUSE;
	}

	if (mpc->dev) { /* check if the lec is LANE2 capable */
		priv = netdev_priv(mpc->dev);
		if (priv->lane_version < 2) {
			dev_put(mpc->dev);
			mpc->dev = NULL;
		} else
			priv->lane2_ops->associate_indicator = lane2_assoc_ind;
	}

	mpc->mpoad_vcc = vcc;
	vcc->dev = &mpc_dev;
	vcc_insert_socket(sk_atm(vcc));
	set_bit(ATM_VF_META, &vcc->flags);
	set_bit(ATM_VF_READY, &vcc->flags);

	if (mpc->dev) {
		char empty[ATM_ESA_LEN];
		memset(empty, 0, ATM_ESA_LEN);

		start_mpc(mpc, mpc->dev);
		/* set address if mpcd e.g. gets killed and restarted.
		 * If we do not do it now we have to wait for the next LE_ARP
		 */
		if (memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0)
			send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc);
	}

	__module_get(THIS_MODULE);
	return arg;
}
Exemple #27
0
static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
		       struct msghdr *msg, size_t size)
{
	struct sock *sk = sock->sk;
	struct raw_sock *ro = raw_sk(sk);
	struct sk_buff *skb;
	struct net_device *dev;
	int ifindex;
	int err;

	if (msg->msg_name) {
		struct sockaddr_can *addr =
			(struct sockaddr_can *)msg->msg_name;

		if (msg->msg_namelen < sizeof(*addr))
			return -EINVAL;

		if (addr->can_family != AF_CAN)
			return -EINVAL;

		ifindex = addr->can_ifindex;
	} else
		ifindex = ro->ifindex;

	if (size != sizeof(struct can_frame))
		return -EINVAL;

	dev = dev_get_by_index(&init_net, ifindex);
	if (!dev)
		return -ENXIO;

	skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT,
				  &err);
	if (!skb)
		goto put_dev;

	err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
	if (err < 0)
		goto free_skb;
	err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
	if (err < 0)
		goto free_skb;

	/* to be able to check the received tx sock reference in raw_rcv() */
	skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;

	skb->dev = dev;
	skb->sk  = sk;

	err = can_send(skb, ro->loopback);

	dev_put(dev);

	if (err)
		goto send_failed;

	return size;

free_skb:
	kfree_skb(skb);
put_dev:
	dev_put(dev);
send_failed:
	return err;
}
static unsigned int route_oif(const struct ipt_route_target_info *route_info,
			      struct sk_buff *skb) 
{
	unsigned int ifindex = 0;
	struct net_device *dev_out = NULL;

	/* The user set the interface name to use.
	 * Getting the current interface index.
	 */
	if ((dev_out = dev_get_by_name(route_info->oif))) {
		ifindex = dev_out->ifindex;
	} else {
		/* Unknown interface name : packet dropped */
		if (net_ratelimit()) 
			DEBUGP("ipt_ROUTE: oif interface %s not found\n", route_info->oif);
		return NF_DROP;
	}

	/* Trying the standard way of routing packets */
	switch (route(skb, ifindex, route_info)) {
	case 1:
		dev_put(dev_out);
		if (route_info->flags & IPT_ROUTE_CONTINUE)
			return IPT_CONTINUE;

		ip_direct_send(skb);
		return NF_STOLEN;

	case 0:
		/* Failed to send to oif. Trying the hard way */
		if (route_info->flags & IPT_ROUTE_CONTINUE)
			return NF_DROP;

		if (net_ratelimit()) 
			DEBUGP("ipt_ROUTE: forcing the use of %i\n",
			       ifindex);

		/* We have to force the use of an interface.
		 * This interface must be a tunnel interface since
		 * otherwise we can't guess the hw address for
		 * the packet. For a tunnel interface, no hw address
		 * is needed.
		 */
		if ((dev_out->type != ARPHRD_TUNNEL)
		    && (dev_out->type != ARPHRD_IPGRE)) {
			if (net_ratelimit()) 
				DEBUGP("ipt_ROUTE: can't guess the hw addr !\n");
			dev_put(dev_out);
			return NF_DROP;
		}
	
		/* Send the packet. This will also free skb
		 * Do not go through the POST_ROUTING hook because 
		 * skb->dst is not set and because it will probably
		 * get confused by the destination IP address.
		 */
		skb->dev = dev_out;
		dev_direct_send(skb);
		dev_put(dev_out);
		return NF_STOLEN;
		
	default:
		/* Unexpected error */
		dev_put(dev_out);
		return NF_DROP;
	}
}
Exemple #29
0
static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
	struct net_device *dev;
	char name[IFNAMSIZ];
	struct l2tp_tunnel *tunnel;
	struct l2tp_session *session;
	struct l2tp_eth *priv;
	struct l2tp_eth_sess *spriv;
	int rc;
	struct l2tp_eth_net *pn;

	tunnel = l2tp_tunnel_find(net, tunnel_id);
	if (!tunnel) {
		rc = -ENODEV;
		goto out;
	}

	session = l2tp_session_find(net, tunnel, session_id);
	if (session) {
		rc = -EEXIST;
		goto out;
	}

	if (cfg->ifname) {
		dev = dev_get_by_name(net, cfg->ifname);
		if (dev) {
			dev_put(dev);
			rc = -EEXIST;
			goto out;
		}
		strlcpy(name, cfg->ifname, IFNAMSIZ);
	} else
		strcpy(name, L2TP_ETH_DEV_NAME);

	session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
				      peer_session_id, cfg);
	if (!session) {
		rc = -ENOMEM;
		goto out;
	}

	dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup);
	if (!dev) {
		rc = -ENOMEM;
		goto out_del_session;
	}

	dev_net_set(dev, net);
	if (session->mtu == 0)
		session->mtu = dev->mtu - session->hdr_len;
	dev->mtu = session->mtu;
	dev->needed_headroom += session->hdr_len;

	priv = netdev_priv(dev);
	priv->dev = dev;
	priv->session = session;
	INIT_LIST_HEAD(&priv->list);

	priv->tunnel_sock = tunnel->sock;
	session->recv_skb = l2tp_eth_dev_recv;
	session->session_close = l2tp_eth_delete;
#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
	session->show = l2tp_eth_show;
#endif

	spriv = l2tp_session_priv(session);
	spriv->dev = dev;

	rc = register_netdev(dev);
	if (rc < 0)
		goto out_del_dev;

	__module_get(THIS_MODULE);
	/* Must be done after register_netdev() */
	strlcpy(session->ifname, dev->name, IFNAMSIZ);

	dev_hold(dev);
	pn = l2tp_eth_pernet(dev_net(dev));
	spin_lock(&pn->l2tp_eth_lock);
	list_add(&priv->list, &pn->l2tp_eth_dev_list);
	spin_unlock(&pn->l2tp_eth_lock);

	return 0;

out_del_dev:
	free_netdev(dev);
	spriv->dev = NULL;
out_del_session:
	l2tp_session_delete(session);
out:
	return rc;
}
Exemple #30
0
int ipv6_sock_mc_join(struct sock *sk, int ifindex, struct in6_addr *addr)
{
	struct net_device *dev = NULL;
	struct ipv6_mc_socklist *mc_lst;
	struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
	int err;
#ifdef CONFIG_IPV6_MLD6_DEBUG
	char abuf[128];
	in6_ntop(addr, abuf);

	MDBG3((KERN_DEBUG
		"ipv6_sock_mc_join(sk=%p, ifindex=%d, addr=%s)\n",
		sk, ifindex, abuf));
#endif

	if (!(ipv6_addr_type(addr) & IPV6_ADDR_MULTICAST))
		return -EINVAL;

	mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);

	if (mc_lst == NULL)
		return -ENOMEM;

	mc_lst->next = NULL;
	ipv6_addr_copy(&mc_lst->addr, addr);

	if (ifindex == 0) {
		struct rt6_info *rt;
		rt = rt6_lookup(addr, NULL, 0, 0);
		if (rt) {
			dev = rt->rt6i_dev;
			dev_hold(dev);
			dst_release(&rt->u.dst);
		}
	} else
		dev = dev_get_by_index(ifindex);

	if (dev == NULL) {
		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
		return -ENODEV;
	}

	mc_lst->ifindex = dev->ifindex;

	/*
	 *	now add/increase the group membership on the device
	 */

	err = ipv6_dev_mc_inc(dev, addr);

	if (err) {
		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
		dev_put(dev);
		return err;
	}

	write_lock_bh(&ipv6_sk_mc_lock);
	mc_lst->next = np->ipv6_mc_list;
	np->ipv6_mc_list = mc_lst;
	write_unlock_bh(&ipv6_sk_mc_lock);

	dev_put(dev);

	return 0;
}