예제 #1
0
/** 
 * Setups generic netlink connection with the kernel module and retrieves assocaited family id
 *
 * @return 0 on success
 */
static int initialize_netlink_family(void) {
	struct nl_handle* hndl;
	struct nl_msg* msg = NULL;
	struct nl_msg *ans_msg = NULL;
	struct nlmsghdr *nl_hdr;
	struct genlmsghdr* genl_hdr;
	struct nlattr *nla;
	int ret_val = 0;

	hndl = nl_handle_alloc();
	nl_set_buffer_size(hndl, 15000000, 15000000);
	
	//nl_handle_set_peer_pid(hndl, 0);
	//nl_set_passcred(hndl, 1);
	nl_disable_sequence_check(hndl);

	if ( (ret_val=nl_connect(hndl, NETLINK_GENERIC)) )
		goto init_return;
	
	nl_set_buffer_size(hndl, 15000000, 15000000);
  
	if ( (ret_val=prepare_request_message(hndl, CTRL_CMD_GETFAMILY, GENL_ID_CTRL, &msg) ) != 0 ) {
		goto init_return;
  	}
  
	ret_val = nla_put_string(msg,
		   CTRL_ATTR_FAMILY_NAME,
		   "DIRECTORCHNL");

  	if (ret_val != 0)
		goto init_return;

	if ( (ret_val = send_request_message(hndl, msg, 0) ) != 0 )
		goto init_return;
	if ( (ret_val = read_message(hndl, &ans_msg) ) != 0 )
		goto init_return;

	genl_hdr = nl_msg_genlhdr(ans_msg);
 	if (genl_hdr == NULL || genl_hdr->cmd != CTRL_CMD_NEWFAMILY) {
    		ret_val = -EBADMSG;
    		goto init_return;
  	}

	nla = nlmsg_find_attr(nlmsg_hdr(ans_msg), sizeof(struct genlmsghdr), CTRL_ATTR_FAMILY_ID);
  	if (nla == NULL) {
    		ret_val = -EBADMSG;
    		goto init_return;
  	}

  	state.gnl_fid = nla_get_u16(nla);  
  	if (state.gnl_fid == 0) {
    		ret_val = -EBADMSG;
    		goto init_return;
  	}
  	  	

  	state.handle = hndl;

	return 0;

init_return:
	nlmsg_free(ans_msg);
	
	if ( state.handle == NULL ) {
		nl_close(hndl);
		nl_handle_destroy(hndl);
  	}

	return -EINVAL;
}
예제 #2
0
static int ieee802154_dump_phy(struct sk_buff *skb,
	struct netlink_callback *cb)
{
	struct dump_phy_data data = {
		.cb = cb,
		.skb = skb,
		.s_idx = cb->args[0],
		.idx = 0,
	};

	pr_debug("%s\n", __func__);

	wpan_phy_for_each(ieee802154_dump_phy_iter, &data);

	cb->args[0] = data.idx;

	return skb->len;
}

static int ieee802154_add_iface(struct sk_buff *skb,
		struct genl_info *info)
{
	struct sk_buff *msg;
	struct wpan_phy *phy;
	const char *name;
	const char *devname;
	int rc = -ENOBUFS;
	struct net_device *dev;
	int type = __IEEE802154_DEV_INVALID;

	pr_debug("%s\n", __func__);

	if (!info->attrs[IEEE802154_ATTR_PHY_NAME])
		return -EINVAL;

	name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
	if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
		return -EINVAL; /* phy name should be null-terminated */

	if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
		devname = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
		if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1]
				!= '\0')
			return -EINVAL; /* phy name should be null-terminated */
	} else  {
		devname = "wpan%d";
	}

	if (strlen(devname) >= IFNAMSIZ)
		return -ENAMETOOLONG;

	phy = wpan_phy_find(name);
	if (!phy)
		return -ENODEV;

	msg = ieee802154_nl_new_reply(info, 0, IEEE802154_ADD_IFACE);
	if (!msg)
		goto out_dev;

	if (!phy->add_iface) {
		rc = -EINVAL;
		goto nla_put_failure;
	}

	if (info->attrs[IEEE802154_ATTR_HW_ADDR] &&
	    nla_len(info->attrs[IEEE802154_ATTR_HW_ADDR]) !=
			IEEE802154_ADDR_LEN) {
		rc = -EINVAL;
		goto nla_put_failure;
	}

	if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
		type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
		if (type >= __IEEE802154_DEV_MAX)
			return -EINVAL;
	}

	dev = phy->add_iface(phy, devname, type);
	if (IS_ERR(dev)) {
		rc = PTR_ERR(dev);
		goto nla_put_failure;
	}

	if (info->attrs[IEEE802154_ATTR_HW_ADDR]) {
		struct sockaddr addr;

		addr.sa_family = ARPHRD_IEEE802154;
		nla_memcpy(&addr.sa_data, info->attrs[IEEE802154_ATTR_HW_ADDR],
				IEEE802154_ADDR_LEN);

		/*
		 * strangely enough, some callbacks (inetdev_event) from
		 * dev_set_mac_address require RTNL_LOCK
		 */
		rtnl_lock();
		rc = dev_set_mac_address(dev, &addr);
		rtnl_unlock();
		if (rc)
			goto dev_unregister;
	}

	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
	    nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
		goto nla_put_failure;
	dev_put(dev);

	wpan_phy_put(phy);

	return ieee802154_nl_reply(msg, info);

dev_unregister:
	rtnl_lock(); /* del_iface must be called with RTNL lock */
	phy->del_iface(phy, dev);
	dev_put(dev);
	rtnl_unlock();
nla_put_failure:
	nlmsg_free(msg);
out_dev:
	wpan_phy_put(phy);
	return rc;
}

static int ieee802154_del_iface(struct sk_buff *skb,
		struct genl_info *info)
{
	struct sk_buff *msg;
	struct wpan_phy *phy;
	const char *name;
	int rc;
	struct net_device *dev;

	pr_debug("%s\n", __func__);

	if (!info->attrs[IEEE802154_ATTR_DEV_NAME])
		return -EINVAL;

	name = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
	if (name[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0')
		return -EINVAL; /* name should be null-terminated */

	dev = dev_get_by_name(genl_info_net(info), name);
	if (!dev)
		return -ENODEV;

	phy = ieee802154_mlme_ops(dev)->get_phy(dev);
	BUG_ON(!phy);

	rc = -EINVAL;
	/* phy name is optional, but should be checked if it's given */
	if (info->attrs[IEEE802154_ATTR_PHY_NAME]) {
		struct wpan_phy *phy2;

		const char *pname =
			nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
		if (pname[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1]
				!= '\0')
			/* name should be null-terminated */
			goto out_dev;

		phy2 = wpan_phy_find(pname);
		if (!phy2)
			goto out_dev;

		if (phy != phy2) {
			wpan_phy_put(phy2);
			goto out_dev;
		}
	}

	rc = -ENOBUFS;

	msg = ieee802154_nl_new_reply(info, 0, IEEE802154_DEL_IFACE);
	if (!msg)
		goto out_dev;

	if (!phy->del_iface) {
		rc = -EINVAL;
		goto nla_put_failure;
	}

	rtnl_lock();
	phy->del_iface(phy, dev);

	/* We don't have device anymore */
	dev_put(dev);
	dev = NULL;

	rtnl_unlock();

	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
	    nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name))
		goto nla_put_failure;
	wpan_phy_put(phy);

	return ieee802154_nl_reply(msg, info);

nla_put_failure:
	nlmsg_free(msg);
out_dev:
	wpan_phy_put(phy);
	if (dev)
		dev_put(dev);

	return rc;
}

static struct genl_ops ieee802154_phy_ops[] = {
	IEEE802154_DUMP(IEEE802154_LIST_PHY, ieee802154_list_phy,
							ieee802154_dump_phy),
	IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface),
	IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface),
};

/*
 * No need to unregister as family unregistration will do it.
 */
int nl802154_phy_register(void)
{
	int i;
	int rc;

	for (i = 0; i < ARRAY_SIZE(ieee802154_phy_ops); i++) {
		rc = genl_register_ops(&nl802154_family,
				&ieee802154_phy_ops[i]);
		if (rc)
			return rc;
	}

	return 0;
}
예제 #3
0
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
			      struct sk_buff *skb, struct inet_diag_req_v2 *req,
			      struct user_namespace *user_ns,		      	
			      u32 portid, u32 seq, u16 nlmsg_flags,
			      const struct nlmsghdr *unlh)
{
	const struct inet_sock *inet = inet_sk(sk);
	struct inet_diag_msg *r;
	struct nlmsghdr  *nlh;
	struct nlattr *attr;
	void *info = NULL;
	const struct inet_diag_handler *handler;
	int ext = req->idiag_ext;

	handler = inet_diag_table[req->sdiag_protocol];
	BUG_ON(handler == NULL);

	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
			nlmsg_flags);
	if (!nlh)
		return -EMSGSIZE;

	r = nlmsg_data(nlh);
	BUG_ON(sk->sk_state == TCP_TIME_WAIT);

	r->idiag_family = sk->sk_family;
	r->idiag_state = sk->sk_state;
	r->idiag_timer = 0;
	r->idiag_retrans = 0;

	r->id.idiag_if = sk->sk_bound_dev_if;
	sock_diag_save_cookie(sk, r->id.idiag_cookie);

	r->id.idiag_sport = inet->inet_sport;
	r->id.idiag_dport = inet->inet_dport;
	r->id.idiag_src[0] = inet->inet_rcv_saddr;
	r->id.idiag_dst[0] = inet->inet_daddr;

	if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
		goto errout;

	/* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
	 * hence this needs to be included regardless of socket family.
	 */
	if (ext & (1 << (INET_DIAG_TOS - 1)))
		if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
			goto errout;

#if IS_ENABLED(CONFIG_IPV6)
	if (r->idiag_family == AF_INET6) {
		const struct ipv6_pinfo *np = inet6_sk(sk);

		*(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
		*(struct in6_addr *)r->id.idiag_dst = np->daddr;

		if (ext & (1 << (INET_DIAG_TCLASS - 1)))
			if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
				goto errout;
	}
#endif

	r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
	r->idiag_inode = sock_i_ino(sk);

	if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
		struct inet_diag_meminfo minfo = {
			.idiag_rmem = sk_rmem_alloc_get(sk),
			.idiag_wmem = sk->sk_wmem_queued,
			.idiag_fmem = sk->sk_forward_alloc,
			.idiag_tmem = sk_wmem_alloc_get(sk),
		};

		if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
			goto errout;
	}

	if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
		if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
			goto errout;

	if (icsk == NULL) {
		handler->idiag_get_info(sk, r, NULL);
		goto out;
	}

#define EXPIRES_IN_MS(tmo)  DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)

	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
		r->idiag_timer = 1;
		r->idiag_retrans = icsk->icsk_retransmits;
		r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
		r->idiag_timer = 4;
		r->idiag_retrans = icsk->icsk_probes_out;
		r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
	} else if (timer_pending(&sk->sk_timer)) {
		r->idiag_timer = 2;
		r->idiag_retrans = icsk->icsk_probes_out;
		r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
	} else {
		r->idiag_timer = 0;
		r->idiag_expires = 0;
	}
#undef EXPIRES_IN_MS

	if (ext & (1 << (INET_DIAG_INFO - 1))) {
		attr = nla_reserve(skb, INET_DIAG_INFO,
				   sizeof(struct tcp_info));
		if (!attr)
			goto errout;

		info = nla_data(attr);
	}

	if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
		if (nla_put_string(skb, INET_DIAG_CONG,
				   icsk->icsk_ca_ops->name) < 0)
			goto errout;

	handler->idiag_get_info(sk, r, info);

	if (sk->sk_state < TCP_TIME_WAIT &&
	    icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
		icsk->icsk_ca_ops->get_info(sk, ext, skb);

out:
	return nlmsg_end(skb, nlh);

errout:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);

static int inet_csk_diag_fill(struct sock *sk,
			      struct sk_buff *skb, struct inet_diag_req_v2 *req,
			      struct user_namespace *user_ns,
			      u32 portid, u32 seq, u16 nlmsg_flags,
			      const struct nlmsghdr *unlh)
{
	return inet_sk_diag_fill(sk, inet_csk(sk),
			skb, req, user_ns, portid, seq, nlmsg_flags, unlh);
}

static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
			       struct sk_buff *skb, struct inet_diag_req_v2 *req,
			       u32 portid, u32 seq, u16 nlmsg_flags,
			       const struct nlmsghdr *unlh)
{
	long tmo;
	struct inet_diag_msg *r;
	struct nlmsghdr *nlh;

	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
			nlmsg_flags);
	if (!nlh)
		return -EMSGSIZE;

	r = nlmsg_data(nlh);
	BUG_ON(tw->tw_state != TCP_TIME_WAIT);

	tmo = tw->tw_ttd - jiffies;
	if (tmo < 0)
		tmo = 0;

	r->idiag_family	      = tw->tw_family;
	r->idiag_retrans      = 0;
	r->id.idiag_if	      = tw->tw_bound_dev_if;
	sock_diag_save_cookie(tw, r->id.idiag_cookie);
	r->id.idiag_sport     = tw->tw_sport;
	r->id.idiag_dport     = tw->tw_dport;
	r->id.idiag_src[0]    = tw->tw_rcv_saddr;
	r->id.idiag_dst[0]    = tw->tw_daddr;
	r->idiag_state	      = tw->tw_substate;
	r->idiag_timer	      = 3;
	r->idiag_expires      = DIV_ROUND_UP(tmo * 1000, HZ);
	r->idiag_rqueue	      = 0;
	r->idiag_wqueue	      = 0;
	r->idiag_uid	      = 0;
	r->idiag_inode	      = 0;
#if IS_ENABLED(CONFIG_IPV6)
	if (tw->tw_family == AF_INET6) {
		const struct inet6_timewait_sock *tw6 =
						inet6_twsk((struct sock *)tw);

		*(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
		*(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
	}
#endif

	return nlmsg_end(skb, nlh);
}
예제 #4
0
int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct dump_phy_data data = {
		.cb = cb,
		.skb = skb,
		.s_idx = cb->args[0],
		.idx = 0,
	};

	pr_debug("%s\n", __func__);

	wpan_phy_for_each(ieee802154_dump_phy_iter, &data);

	cb->args[0] = data.idx;

	return skb->len;
}

int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
{
	struct sk_buff *msg;
	struct wpan_phy *phy;
	const char *name;
	const char *devname;
	int rc = -ENOBUFS;
	struct net_device *dev;
	int type = __IEEE802154_DEV_INVALID;
	unsigned char name_assign_type;

	pr_debug("%s\n", __func__);

	if (!info->attrs[IEEE802154_ATTR_PHY_NAME])
		return -EINVAL;

	name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
	if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
		return -EINVAL; /* phy name should be null-terminated */

	if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
		devname = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
		if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1]
				!= '\0')
			return -EINVAL; /* phy name should be null-terminated */
		name_assign_type = NET_NAME_USER;
	} else  {
		devname = "wpan%d";
		name_assign_type = NET_NAME_ENUM;
	}

	if (strlen(devname) >= IFNAMSIZ)
		return -ENAMETOOLONG;

	phy = wpan_phy_find(name);
	if (!phy)
		return -ENODEV;

	msg = ieee802154_nl_new_reply(info, 0, IEEE802154_ADD_IFACE);
	if (!msg)
		goto out_dev;

	if (info->attrs[IEEE802154_ATTR_HW_ADDR] &&
	    nla_len(info->attrs[IEEE802154_ATTR_HW_ADDR]) !=
			IEEE802154_ADDR_LEN) {
		rc = -EINVAL;
		goto nla_put_failure;
	}

	if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
		type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
		if (type >= __IEEE802154_DEV_MAX) {
			rc = -EINVAL;
			goto nla_put_failure;
		}
	}

	dev = rdev_add_virtual_intf_deprecated(wpan_phy_to_rdev(phy), devname,
					       name_assign_type, type);
	if (IS_ERR(dev)) {
		rc = PTR_ERR(dev);
		goto nla_put_failure;
	}
	dev_hold(dev);

	if (info->attrs[IEEE802154_ATTR_HW_ADDR]) {
		struct sockaddr addr;

		addr.sa_family = ARPHRD_IEEE802154;
		nla_memcpy(&addr.sa_data, info->attrs[IEEE802154_ATTR_HW_ADDR],
			   IEEE802154_ADDR_LEN);

		/* strangely enough, some callbacks (inetdev_event) from
		 * dev_set_mac_address require RTNL_LOCK
		 */
		rtnl_lock();
		rc = dev_set_mac_address(dev, &addr);
		rtnl_unlock();
		if (rc)
			goto dev_unregister;
	}

	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
	    nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
		goto nla_put_failure;
	dev_put(dev);

	wpan_phy_put(phy);

	return ieee802154_nl_reply(msg, info);

dev_unregister:
	rtnl_lock(); /* del_iface must be called with RTNL lock */
	rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev);
	dev_put(dev);
	rtnl_unlock();
nla_put_failure:
	nlmsg_free(msg);
out_dev:
	wpan_phy_put(phy);
	return rc;
}

int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info)
{
	struct sk_buff *msg;
	struct wpan_phy *phy;
	const char *name;
	int rc;
	struct net_device *dev;

	pr_debug("%s\n", __func__);

	if (!info->attrs[IEEE802154_ATTR_DEV_NAME])
		return -EINVAL;

	name = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
	if (name[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0')
		return -EINVAL; /* name should be null-terminated */

	dev = dev_get_by_name(genl_info_net(info), name);
	if (!dev)
		return -ENODEV;

	phy = dev->ieee802154_ptr->wpan_phy;
	BUG_ON(!phy);
	get_device(&phy->dev);

	rc = -EINVAL;
	/* phy name is optional, but should be checked if it's given */
	if (info->attrs[IEEE802154_ATTR_PHY_NAME]) {
		struct wpan_phy *phy2;

		const char *pname =
			nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
		if (pname[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1]
				!= '\0')
			/* name should be null-terminated */
			goto out_dev;

		phy2 = wpan_phy_find(pname);
		if (!phy2)
			goto out_dev;

		if (phy != phy2) {
			wpan_phy_put(phy2);
			goto out_dev;
		}
	}

	rc = -ENOBUFS;

	msg = ieee802154_nl_new_reply(info, 0, IEEE802154_DEL_IFACE);
	if (!msg)
		goto out_dev;

	rtnl_lock();
	rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev);

	/* We don't have device anymore */
	dev_put(dev);
	dev = NULL;

	rtnl_unlock();

	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
	    nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name))
		goto nla_put_failure;
	wpan_phy_put(phy);

	return ieee802154_nl_reply(msg, info);

nla_put_failure:
	nlmsg_free(msg);
out_dev:
	wpan_phy_put(phy);
	if (dev)
		dev_put(dev);

	return rc;
}
예제 #5
0
static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
		   struct tcf_result *res)
{
	int ret = 0, result = 0;
	struct tcf_ipt *ipt = to_ipt(a);
	struct xt_action_param par;
	struct nf_hook_state state = {
		.net	= dev_net(skb->dev),
		.in	= skb->dev,
		.hook	= ipt->tcfi_hook,
		.pf	= NFPROTO_IPV4,
	};

	if (skb_unclone(skb, GFP_ATOMIC))
		return TC_ACT_UNSPEC;

	spin_lock(&ipt->tcf_lock);

	tcf_lastuse_update(&ipt->tcf_tm);
	bstats_update(&ipt->tcf_bstats, skb);

	/* yes, we have to worry about both in and out dev
	 * worry later - danger - this API seems to have changed
	 * from earlier kernels
	 */
	par.state    = &state;
	par.target   = ipt->tcfi_t->u.kernel.target;
	par.targinfo = ipt->tcfi_t->data;
	ret = par.target->target(skb, &par);

	switch (ret) {
	case NF_ACCEPT:
		result = TC_ACT_OK;
		break;
	case NF_DROP:
		result = TC_ACT_SHOT;
		ipt->tcf_qstats.drops++;
		break;
	case XT_CONTINUE:
		result = TC_ACT_PIPE;
		break;
	default:
		net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
				       ret);
		result = TC_ACT_OK;
		break;
	}
	spin_unlock(&ipt->tcf_lock);
	return result;

}

static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
			int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_ipt *ipt = to_ipt(a);
	struct xt_entry_target *t;
	struct tcf_t tm;
	struct tc_cnt c;

	/* for simple targets kernel size == user size
	 * user name = target name
	 * for foolproof you need to not assume this
	 */

	t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
	if (unlikely(!t))
		goto nla_put_failure;

	c.bindcnt = ipt->tcf_bindcnt - bind;
	c.refcnt = ipt->tcf_refcnt - ref;
	strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);

	if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
	    nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
	    nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
	    nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
	    nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
		goto nla_put_failure;

	tcf_tm_dump(&tm, &ipt->tcf_tm);
	if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
		goto nla_put_failure;

	kfree(t);
	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	kfree(t);
	return -1;
}
예제 #6
0
static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
				struct l2tp_session *session, u8 cmd)
{
	void *hdr;
	struct nlattr *nest;
	struct l2tp_tunnel *tunnel = session->tunnel;
	struct sock *sk = NULL;

	sk = tunnel->sock;

	hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd);
	if (!hdr)
		return -EMSGSIZE;

	if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
	    nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
	    nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
	    nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
			session->peer_session_id) ||
	    nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
	    nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) ||
	    nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) ||
	    (session->mru &&
	     nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
		goto nla_put_failure;

	if ((session->ifname[0] &&
	     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
	    (session->cookie_len &&
	     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
		     &session->cookie[0])) ||
	    (session->peer_cookie_len &&
	     nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
		     &session->peer_cookie[0])) ||
	    nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
	    nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
	    nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
#ifdef CONFIG_XFRM
	    (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) &&
	     nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
#endif
	    (session->reorder_timeout &&
	     nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT,
			   session->reorder_timeout, L2TP_ATTR_PAD)))
		goto nla_put_failure;

	nest = nla_nest_start(skb, L2TP_ATTR_STATS);
	if (nest == NULL)
		goto nla_put_failure;

	if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
			      atomic_long_read(&session->stats.tx_packets),
			      L2TP_ATTR_STATS_PAD) ||
	    nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
			      atomic_long_read(&session->stats.tx_bytes),
			      L2TP_ATTR_STATS_PAD) ||
	    nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
			      atomic_long_read(&session->stats.tx_errors),
			      L2TP_ATTR_STATS_PAD) ||
	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
			      atomic_long_read(&session->stats.rx_packets),
			      L2TP_ATTR_STATS_PAD) ||
	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
			      atomic_long_read(&session->stats.rx_bytes),
			      L2TP_ATTR_STATS_PAD) ||
	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
			      atomic_long_read(&session->stats.rx_seq_discards),
			      L2TP_ATTR_STATS_PAD) ||
	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
			      atomic_long_read(&session->stats.rx_oos_packets),
			      L2TP_ATTR_STATS_PAD) ||
	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
			      atomic_long_read(&session->stats.rx_errors),
			      L2TP_ATTR_STATS_PAD))
		goto nla_put_failure;
	nla_nest_end(skb, nest);

	genlmsg_end(skb, hdr);
	return 0;

 nla_put_failure:
	genlmsg_cancel(skb, hdr);
	return -1;
}
예제 #7
0
int
crn_create_veth_pair(char *name1, char *name2) {
	struct nl_handler nlh;
	struct nlmsg *nlmsg = NULL, *answer = NULL;
	struct link_req *link_req;
	struct rtattr *nest1, *nest2, *nest3;
	int len, err;

	err = netlink_open(&nlh, NETLINK_ROUTE);
	if (err)
		return err;

	err = -EINVAL;
	len = strlen(name1);
	if (len == 1 || len >= IFNAMSIZ)
		goto out;

	len = strlen(name2);
	if (len == 1 || len >= IFNAMSIZ)
		goto out;

	err = -ENOMEM;
	nlmsg = nlmsg_alloc(NLMSG_GOOD_SIZE);
	if (!nlmsg)
		goto out;

	answer = nlmsg_alloc(NLMSG_GOOD_SIZE);
	if (!answer)
		goto out;

	link_req = (struct link_req *)nlmsg;
	link_req->ifinfomsg.ifi_family = AF_UNSPEC;
	nlmsg->nlmsghdr.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
	nlmsg->nlmsghdr.nlmsg_flags =
		NLM_F_REQUEST|NLM_F_CREATE|NLM_F_EXCL|NLM_F_ACK;
	nlmsg->nlmsghdr.nlmsg_type = RTM_NEWLINK;

	err = -EINVAL;
	nest1 = nla_begin_nested(nlmsg, IFLA_LINKINFO);
	if (!nest1)
		goto out;

	if (nla_put_string(nlmsg, IFLA_INFO_KIND, "veth"))
		goto out;

	nest2 = nla_begin_nested(nlmsg, IFLA_INFO_DATA);
	if (!nest2)
		goto out;

	nest3 = nla_begin_nested(nlmsg, VETH_INFO_PEER);
	if (!nest3)
		goto out;

	nlmsg->nlmsghdr.nlmsg_len += sizeof(struct ifinfomsg);

	if (nla_put_string(nlmsg, IFLA_IFNAME, name2))
		goto out;

	nla_end_nested(nlmsg, nest3);

	nla_end_nested(nlmsg, nest2);

	nla_end_nested(nlmsg, nest1);

	if (nla_put_string(nlmsg, IFLA_IFNAME, name1))
		goto out;

	err = netlink_transaction(&nlh, nlmsg, answer);
out:
	netlink_close(&nlh);
	nlmsg_free(answer);
	nlmsg_free(nlmsg);
	return err;
}
예제 #8
0
int demo_cmd(struct sk_buff *skb_2, struct genl_info *info)
{
	struct nlattr *na;
	struct sk_buff *skb;
	int rc = 0;
	void *msg_head;
	char *attr_str;
    u16 attr_u16;
	struct attr_custom cp;

	printk("got demo_cmd\n");

	if (info == NULL) {
		goto out;
	}

	na = info->attrs[DEMO_ATTR1_STRING];
	if (na) {
		attr_str = (char *)nla_data(na);
		if (attr_str == NULL) {
			printk("error while receiving data\n");
		}
		else {
			printk("attr1: %s\n", attr_str);
		}
	}
	else {
		printk("no attr1\n");
	}

	na = info->attrs[DEMO_ATTR2_UINT16];
	if (na) {
		attr_u16 = nla_get_u16(na);
		printk("attr2: %x\n", attr_u16);
	}
	else {
		printk("no attr2\n");
	}

	/* send message back */
	/* allocate some memory, since the size is not yet known use NLMSG_GOODSIZE */
	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	if (skb == NULL) {
		goto out;
	}

	/* create the message */
	msg_head =
	    genlmsg_put(skb, 0, info->snd_seq + 1, &demo_gnl_family, 0,
			DEMO_CMD);

	if (msg_head == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	rc |= nla_put_string(skb, DEMO_ATTR1_STRING,"world");
	rc |= nla_put_u16(skb, DEMO_ATTR2_UINT16, 0x1f);
	cp.a = 1;
	cp.b = 2;
	cp.c = 3.0;
	cp.d = 4.0;
	rc |= nla_put(skb, DEMO_ATTR3_CUSTOM, sizeof(struct attr_custom), &cp);

	if (rc != 0) {
		goto out;
	}

	/* finalize the message */
	genlmsg_end(skb, msg_head);

	/* send the message back */
	rc = genlmsg_unicast(&init_net, skb, info->snd_portid);

	if (rc != 0) {
		goto out;
	}

	return 0;

 out:
	printk("an error occured\n");

	return -1;
}
예제 #9
0
static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
				struct l2tp_session *session)
{
	void *hdr;
	struct nlattr *nest;
	struct l2tp_tunnel *tunnel = session->tunnel;
	struct sock *sk = NULL;
	struct l2tp_stats stats;
	unsigned int start;

	sk = tunnel->sock;

	hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
	if (IS_ERR(hdr))
		return PTR_ERR(hdr);

	if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
	    nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
	    nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
	    nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
			session->peer_session_id) ||
	    nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
	    nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) ||
	    nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) ||
	    (session->mru &&
	     nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
		goto nla_put_failure;

	if ((session->ifname && session->ifname[0] &&
	     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
	    (session->cookie_len &&
	     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
		     &session->cookie[0])) ||
	    (session->peer_cookie_len &&
	     nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
		     &session->peer_cookie[0])) ||
	    nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
	    nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
	    nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
#ifdef CONFIG_XFRM
	    (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) &&
	     nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
#endif
	    (session->reorder_timeout &&
	     nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout)))
		goto nla_put_failure;

	nest = nla_nest_start(skb, L2TP_ATTR_STATS);
	if (nest == NULL)
		goto nla_put_failure;

	do {
		start = u64_stats_fetch_begin(&session->stats.syncp);
		stats.tx_packets = session->stats.tx_packets;
		stats.tx_bytes = session->stats.tx_bytes;
		stats.tx_errors = session->stats.tx_errors;
		stats.rx_packets = session->stats.rx_packets;
		stats.rx_bytes = session->stats.rx_bytes;
		stats.rx_errors = session->stats.rx_errors;
		stats.rx_seq_discards = session->stats.rx_seq_discards;
		stats.rx_oos_packets = session->stats.rx_oos_packets;
	} while (u64_stats_fetch_retry(&session->stats.syncp, start));

	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
			stats.rx_seq_discards) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
			stats.rx_oos_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
		goto nla_put_failure;
	nla_nest_end(skb, nest);

	return genlmsg_end(skb, hdr);

 nla_put_failure:
	genlmsg_cancel(skb, hdr);
	return -1;
}
예제 #10
0
void nft_trace_notify(struct nft_traceinfo *info)
{
	const struct nft_pktinfo *pkt = info->pkt;
	struct nfgenmsg *nfmsg;
	struct nlmsghdr *nlh;
	struct sk_buff *skb;
	unsigned int size;
	int event = (NFNL_SUBSYS_NFTABLES << 8) | NFT_MSG_TRACE;

	if (!nfnetlink_has_listeners(pkt->net, NFNLGRP_NFTRACE))
		return;

	size = nlmsg_total_size(sizeof(struct nfgenmsg)) +
		nla_total_size(NFT_TABLE_MAXNAMELEN) +
		nla_total_size(NFT_CHAIN_MAXNAMELEN) +
		nla_total_size_64bit(sizeof(__be64)) +	/* rule handle */
		nla_total_size(sizeof(__be32)) +	/* trace type */
		nla_total_size(0) +			/* VERDICT, nested */
			nla_total_size(sizeof(u32)) +	/* verdict code */
			nla_total_size(NFT_CHAIN_MAXNAMELEN) + /* jump target */
		nla_total_size(sizeof(u32)) +		/* id */
		nla_total_size(NFT_TRACETYPE_LL_HSIZE) +
		nla_total_size(NFT_TRACETYPE_NETWORK_HSIZE) +
		nla_total_size(NFT_TRACETYPE_TRANSPORT_HSIZE) +
		nla_total_size(sizeof(u32)) +		/* iif */
		nla_total_size(sizeof(__be16)) +	/* iiftype */
		nla_total_size(sizeof(u32)) +		/* oif */
		nla_total_size(sizeof(__be16)) +	/* oiftype */
		nla_total_size(sizeof(u32)) +		/* mark */
		nla_total_size(sizeof(u32)) +		/* nfproto */
		nla_total_size(sizeof(u32));		/* policy */

	skb = nlmsg_new(size, GFP_ATOMIC);
	if (!skb)
		return;

	nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct nfgenmsg), 0);
	if (!nlh)
		goto nla_put_failure;

	nfmsg = nlmsg_data(nlh);
	nfmsg->nfgen_family	= info->basechain->type->family;
	nfmsg->version		= NFNETLINK_V0;
	nfmsg->res_id		= 0;

	if (nla_put_be32(skb, NFTA_TRACE_NFPROTO, htonl(pkt->pf)))
		goto nla_put_failure;

	if (nla_put_be32(skb, NFTA_TRACE_TYPE, htonl(info->type)))
		goto nla_put_failure;

	if (trace_fill_id(skb, pkt->skb))
		goto nla_put_failure;

	if (info->chain) {
		if (nla_put_string(skb, NFTA_TRACE_CHAIN,
				   info->chain->name))
			goto nla_put_failure;
		if (nla_put_string(skb, NFTA_TRACE_TABLE,
				   info->chain->table->name))
			goto nla_put_failure;
	}

	if (nf_trace_fill_rule_info(skb, info))
		goto nla_put_failure;

	switch (info->type) {
	case NFT_TRACETYPE_UNSPEC:
	case __NFT_TRACETYPE_MAX:
		break;
	case NFT_TRACETYPE_RETURN:
	case NFT_TRACETYPE_RULE:
		if (nft_verdict_dump(skb, NFTA_TRACE_VERDICT, info->verdict))
			goto nla_put_failure;
		break;
	case NFT_TRACETYPE_POLICY:
		if (nla_put_be32(skb, NFTA_TRACE_POLICY,
				 info->basechain->policy))
			goto nla_put_failure;
		break;
	}

	if (pkt->skb->mark &&
	    nla_put_be32(skb, NFTA_TRACE_MARK, htonl(pkt->skb->mark)))
		goto nla_put_failure;

	if (!info->packet_dumped) {
		if (nf_trace_fill_dev_info(skb, pkt->in, pkt->out))
			goto nla_put_failure;

		if (nf_trace_fill_pkt_info(skb, pkt))
			goto nla_put_failure;
		info->packet_dumped = true;
	}

	nlmsg_end(skb, nlh);
	nfnetlink_send(skb, pkt->net, 0, NFNLGRP_NFTRACE, 0, GFP_ATOMIC);
	return;

 nla_put_failure:
	WARN_ON_ONCE(1);
	kfree_skb(skb);
}
예제 #11
0
파일: act_simple.c 프로젝트: AK101111/linux
static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
			 int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_defact *d = to_defact(a);
	struct tc_defact opt = {
		.index   = d->tcf_index,
		.refcnt  = d->tcf_refcnt - ref,
		.bindcnt = d->tcf_bindcnt - bind,
		.action  = d->tcf_action,
	};
	struct tcf_t t;

	if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) ||
	    nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata))
		goto nla_put_failure;

	tcf_tm_dump(&t, &d->tcf_tm);
	if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD))
		goto nla_put_failure;
	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

static int tcf_simp_walker(struct net *net, struct sk_buff *skb,
			   struct netlink_callback *cb, int type,
			   const struct tc_action_ops *ops)
{
	struct tc_action_net *tn = net_generic(net, simp_net_id);

	return tcf_generic_walker(tn, skb, cb, type, ops);
}

static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index)
{
	struct tc_action_net *tn = net_generic(net, simp_net_id);

	return tcf_hash_search(tn, a, index);
}

static struct tc_action_ops act_simp_ops = {
	.kind		=	"simple",
	.type		=	TCA_ACT_SIMP,
	.owner		=	THIS_MODULE,
	.act		=	tcf_simp,
	.dump		=	tcf_simp_dump,
	.cleanup	=	tcf_simp_release,
	.init		=	tcf_simp_init,
	.walk		=	tcf_simp_walker,
	.lookup		=	tcf_simp_search,
	.size		=	sizeof(struct tcf_defact),
};

static __net_init int simp_init_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, simp_net_id);

	return tc_action_net_init(tn, &act_simp_ops, SIMP_TAB_MASK);
}

static void __net_exit simp_exit_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, simp_net_id);

	tc_action_net_exit(tn);
}

static struct pernet_operations simp_net_ops = {
	.init = simp_init_net,
	.exit = simp_exit_net,
	.id   = &simp_net_id,
	.size = sizeof(struct tc_action_net),
};

MODULE_AUTHOR("Jamal Hadi Salim(2005)");
MODULE_DESCRIPTION("Simple example action");
MODULE_LICENSE("GPL");

static int __init simp_init_module(void)
{
	int ret = tcf_register_action(&act_simp_ops, &simp_net_ops);
	if (!ret)
		pr_info("Simple TC action Loaded\n");
	return ret;
}

static void __exit simp_cleanup_module(void)
{
	tcf_unregister_action(&act_simp_ops, &simp_net_ops);
}

module_init(simp_init_module);
module_exit(simp_cleanup_module);