Exemplo n.º 1
0
static int ipvs_services_parse_cb(struct nl_msg *msg, void *arg)
{
	struct nlmsghdr *nlh = nlmsg_hdr(msg);
	struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
	struct nlattr *svc_attrs[IPVS_SVC_ATTR_MAX + 1];
	struct ip_vs_get_services **getp = (struct ip_vs_get_services **)arg;
	struct ip_vs_get_services *get = (struct ip_vs_get_services *)*getp;
	struct ip_vs_flags flags;
	int i = get->num_services;

	if (genlmsg_parse(nlh, 0, attrs, IPVS_CMD_ATTR_MAX, ipvs_cmd_policy) != 0)
		return -1;

	if (!attrs[IPVS_CMD_ATTR_SERVICE])
		return -1;

	if (nla_parse_nested(svc_attrs, IPVS_SVC_ATTR_MAX, attrs[IPVS_CMD_ATTR_SERVICE], ipvs_service_policy))
		return -1;

	memset(&(get->entrytable[i]), 0, sizeof(get->entrytable[i]));

	if (!(svc_attrs[IPVS_SVC_ATTR_AF] &&
	      (svc_attrs[IPVS_SVC_ATTR_FWMARK] ||
	       (svc_attrs[IPVS_SVC_ATTR_PROTOCOL] &&
		svc_attrs[IPVS_SVC_ATTR_ADDR] &&
		svc_attrs[IPVS_SVC_ATTR_PORT])) &&
	      svc_attrs[IPVS_SVC_ATTR_SCHED_NAME] &&
	      svc_attrs[IPVS_SVC_ATTR_NETMASK] &&
	      svc_attrs[IPVS_SVC_ATTR_TIMEOUT] &&
	      svc_attrs[IPVS_SVC_ATTR_FLAGS]))
		return -1;

	get->entrytable[i].af = nla_get_u16(svc_attrs[IPVS_SVC_ATTR_AF]);

	if (svc_attrs[IPVS_SVC_ATTR_FWMARK])
		get->entrytable[i].fwmark = nla_get_u32(svc_attrs[IPVS_SVC_ATTR_FWMARK]);
	else {
		get->entrytable[i].protocol = nla_get_u16(svc_attrs[IPVS_SVC_ATTR_PROTOCOL]);
		memcpy(&(get->entrytable[i].addr), nla_data(svc_attrs[IPVS_SVC_ATTR_ADDR]),
		       sizeof(get->entrytable[i].addr));
		get->entrytable[i].port = nla_get_u16(svc_attrs[IPVS_SVC_ATTR_PORT]);
	}

	strncpy(get->entrytable[i].sched_name,
		nla_get_string(svc_attrs[IPVS_SVC_ATTR_SCHED_NAME]),
		IP_VS_SCHEDNAME_MAXLEN);

	get->entrytable[i].netmask = nla_get_u32(svc_attrs[IPVS_SVC_ATTR_NETMASK]);
	get->entrytable[i].timeout = nla_get_u32(svc_attrs[IPVS_SVC_ATTR_TIMEOUT]);
	nla_memcpy(&flags, svc_attrs[IPVS_SVC_ATTR_FLAGS], sizeof(flags));
	get->entrytable[i].flags = flags.flags & flags.mask;

	if (ipvs_parse_stats(&(get->entrytable[i].stats),
			     svc_attrs[IPVS_SVC_ATTR_STATS]) != 0)
		return -1;

	get->entrytable[i].num_dests = 0;

	i++;

	get->num_services = i;
	get = realloc(get, sizeof(*get)
	      + sizeof(ipvs_service_entry_t) * (get->num_services + 1));
	*getp = get;
	return 0;
}
Exemplo n.º 2
0
nfct_t *nfct_create(ginkgo_ctx *ctx, int grps, nfct_notify cb, void *ud)
{
    nfct_t *ct;
    char name[20];
    ginkgo_src src = {
        .name = name,
        .fd = -1,
        .rd = NULL,
        .wr = NULL,
        .pars = nl_parse,
        .resp = nfct_response,
        .hand = nfct_handler,
        .ud = NULL,
    };
    int fd;

    if(! ctx) return NULL;
    if((fd = nl_open(NETLINK_NETFILTER, grps)) < 0)
        return NULL;
    if(! instantiate(ct))  {
        close(fd);
        return NULL;
    }
    ct->ginkgo = ctx;
    ct->nlfd = fd;
    ct->nlgrps = grps;
    ct->nlseq = 0;
    ct->cb = cb;
    ct->ud = ud;

    src.fd = fd;
    src.ud = ct;
    sprintf(name, "nfct-%d", fd);
    if(ginkgo_src_register(ctx, &src, &ct->id, 0) < 0)  {
        close(fd);
        free(ct);
        return NULL;
    }
    return ct;
}

void nfct_destroy(nfct_t *ct)
{
    if(ct)  {
        ginkgo_src_deregister(ct->ginkgo, ct->id, 1, 1);
        close(ct->nlfd);
        free(ct);
    }
}

int nfct_conn_get_counter(const conn_entry *e, conn_counter *counter)
{
    const struct nlattr *orig = e->nla[CTA_COUNTERS_ORIG];
    const struct nlattr *rep = e->nla[CTA_COUNTERS_REPLY];
    struct nlattr *arr[CTA_COUNTERS_MAX + 1];

    if(orig && rep)  {
        nla_parse_nested(arr, CTA_COUNTERS_MAX, orig);
        if(! arr[CTA_COUNTERS_PACKETS] || ! arr[CTA_COUNTERS_BYTES])
            return -1;
        counter->orig_pkts = be64toh(nla_get_be64(arr[CTA_COUNTERS_PACKETS]));
        counter->orig_bytes = be64toh(nla_get_be64(arr[CTA_COUNTERS_BYTES]));

        nla_parse_nested(arr, CTA_COUNTERS_MAX, rep);
        if(! arr[CTA_COUNTERS_PACKETS] || ! arr[CTA_COUNTERS_BYTES])
            return -1;
        counter->rep_pkts = be64toh(nla_get_be64(arr[CTA_COUNTERS_PACKETS]));
        counter->rep_bytes = be64toh(nla_get_be64(arr[CTA_COUNTERS_BYTES]));
        return 0;
    }
    return -1;
}

int nfct_conn_get_tcpinfo(const conn_entry *e, conn_tcpinfo *info)
{
    const struct nlattr *proto = e->nla[CTA_PROTOINFO];
    const struct nlattr *tcp;
    struct nlattr *arr[CTA_PROTOINFO_TCP_MAX + 1];

    if(proto)  {
        tcp = (const struct nlattr *)nla_data(proto);
        if(nla_type(tcp) != CTA_PROTOINFO_TCP)
            return -1;
        nla_parse_nested(arr, CTA_PROTOINFO_TCP_MAX, tcp);
        if(arr[CTA_PROTOINFO_TCP_STATE]
           || arr[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]
           || arr[CTA_PROTOINFO_TCP_WSCALE_REPLY]
           || arr[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]
           || arr[CTA_PROTOINFO_TCP_FLAGS_REPLY])  {
            info->state = nla_get_u8(arr[CTA_PROTOINFO_TCP_STATE]);
            info->wscale_orig = nla_get_u8(arr[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
            info->wscale_rep = nla_get_u8(arr[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
            nla_get_mem(arr[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL], &info->flags_orig, sizeof(info->flags_orig));
            nla_get_mem(arr[CTA_PROTOINFO_TCP_FLAGS_REPLY], &info->flags_rep, sizeof(info->flags_rep));
            return 0;
        }
    }
    return -1;
}

int nfct_conn_get_sctpinfo(const conn_entry *e, conn_sctpinfo *info)
{
    const struct nlattr *proto = e->nla[CTA_PROTOINFO];
    const struct nlattr *sctp;
    struct nlattr *arr[CTA_PROTOINFO_SCTP_MAX + 1];

    if(proto)  {
        sctp = (const struct nlattr *)nla_data(proto);
        if(nla_type(sctp) != CTA_PROTOINFO_SCTP)
            return -1;
        nla_parse_nested(arr, CTA_PROTOINFO_SCTP_MAX, sctp);
        if(! arr[CTA_PROTOINFO_SCTP_STATE]
           || ! arr[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]
           || ! arr[CTA_PROTOINFO_SCTP_VTAG_REPLY])  {
            info->state = nla_get_u8(arr[CTA_PROTOINFO_SCTP_STATE]);
            info->vtag_orig = ntohl(nla_get_u8(arr[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]));
            info->vtag_rep = ntohl(nla_get_u8(arr[CTA_PROTOINFO_SCTP_VTAG_REPLY]));
            return 0;
        }
    }
    return -1;
}

int __nfct_conn_get_tuple(const conn_entry *e, int type, conn_tuple *tuple)
{
    struct nlattr *arr[CTA_TUPLE_MAX + 1];
    struct nlattr *ip[CTA_IP_MAX + 1];
    struct nlattr *proto[CTA_PROTO_MAX + 1];

    if(! e->nla[type])
        return -1;

    memset(tuple, 0, sizeof(*tuple));
    nla_parse_nested(arr, CTA_TUPLE_MAX, e->nla[type]);
    if(! arr[CTA_TUPLE_IP] || ! arr[CTA_TUPLE_PROTO])  {
        PR_WARN("unexpected NULL tuple IP or proto");
        return-1;
    }

    nla_parse_nested(ip, CTA_IP_MAX, arr[CTA_TUPLE_IP]);
    tuple->src.l3num = e->l3num;
    if(e->l3num == AF_INET)  {
        if(! ip[CTA_IP_V4_SRC] || ! ip[CTA_IP_V4_DST])
            return -1;
        tuple->src.u3.ip = nla_get_be32(ip[CTA_IP_V4_SRC]);
        tuple->dst.u3.ip = nla_get_be32(ip[CTA_IP_V4_DST]);
    }else if(e->l3num == AF_INET6)  {
        if(! ip[CTA_IP_V6_SRC] || ! ip[CTA_IP_V6_DST])
            return -1;
        nla_get_mem(ip[CTA_IP_V6_SRC], &tuple->src.u3.in6, sizeof(tuple->src.u3.in6));
        nla_get_mem(ip[CTA_IP_V6_DST], &tuple->dst.u3.in6, sizeof(tuple->dst.u3.in6));
    }else  {
        PR_WARN("unexpected l3num while parsing tuple:%d", e->l3num);
        return -1;
    }

    nla_parse_nested(proto, CTA_PROTO_MAX, arr[CTA_TUPLE_PROTO]);
    if(! proto[CTA_PROTO_NUM])  {
        PR_WARN("unexpected NULL proto num while parsing tuple");
        return -1;
    }
    tuple->dst.protonum = nla_get_u8(proto[CTA_PROTO_NUM]);
    switch(tuple->dst.protonum)  {
    case IPPROTO_TCP:
    case IPPROTO_UDP:
    case IPPROTO_UDPLITE:
        if(! proto[CTA_PROTO_SRC_PORT] || ! proto[CTA_PROTO_DST_PORT])  {
            PR_WARN("unexpected NULL port while parsing tuple");
            return -1;
        }
        tuple->src.u.tcp.port = nla_get_be16(proto[CTA_PROTO_SRC_PORT]);
        tuple->dst.u.tcp.port = nla_get_be16(proto[CTA_PROTO_DST_PORT]);
        break;
    case IPPROTO_ICMP:
        if(! proto[CTA_PROTO_ICMP_ID] || ! proto[CTA_PROTO_ICMP_TYPE] || ! proto[CTA_PROTO_ICMP_CODE])  {
            PR_WARN("unexpected NULL icmp attr while parsing tuple");
            return -1;
        }
        tuple->src.u.icmp.id = nla_get_be16(proto[CTA_PROTO_ICMP_ID]);
        tuple->dst.u.icmp.type = nla_get_u8(proto[CTA_PROTO_ICMP_TYPE]);
        tuple->dst.u.icmp.code = nla_get_u8(proto[CTA_PROTO_ICMP_CODE]);
        break;
    case IPPROTO_ICMPV6:
        if(! proto[CTA_PROTO_ICMPV6_ID]
           || ! proto[CTA_PROTO_ICMPV6_TYPE]
           || ! proto[CTA_PROTO_ICMPV6_CODE])  {
            PR_WARN("unexpected NULL icmpv6 attr while parsing tuple");
            return -1;
        }
        tuple->src.u.icmp.id = nla_get_be16(proto[CTA_PROTO_ICMPV6_ID]);
        tuple->dst.u.icmp.type = nla_get_u8(proto[CTA_PROTO_ICMPV6_TYPE]);
        tuple->dst.u.icmp.code = nla_get_u8(proto[CTA_PROTO_ICMPV6_CODE]);
        break;
    default:
        PR_INFO("unsupported proto %d while parsing tuple", tuple->dst.protonum);
        return -1;
    }
    return 0;
}

/* fixme: nat seq adj */

nfct_msg *nfct_msg_new(__u8 l3num, int flags)
{
    ginkgo_msg *msg;
    struct nlmsghdr *nlh;
    struct nfgenmsg *nfmsg;
    nfct_msg_ctl *ctl;
    conn_entry *e;

    if((msg = ginkgo_new_msg(0, NFCT_MSG_GOOD_SIZE)))  {
        if(! instantiate(e))  {
            free(msg);
            return NULL;
        }

        nlh = NL_HEADER(msg);
        BZERO(nlh);
        nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
        if(flags & NFCT_F_CREATE)
            nlh->nlmsg_flags |= NLM_F_CREATE;
        if(flags & NFCT_F_EXCL)
            nlh->nlmsg_flags |= NLM_F_EXCL;
        if(flags & NFCT_F_DUMP)
            nlh->nlmsg_flags |= NLM_F_DUMP;

        nfmsg = (struct nfgenmsg *)NLMSG_DATA(nlh);
        nfmsg->nfgen_family = l3num;
        nfmsg->version = NFNETLINK_V0;
        nfmsg->res_id = 0;

        BZERO(e);
        e->l3num = l3num;

        ctl = msg_ctl(msg);
        BZERO(ctl);
        ctl->base.entry = e;
        ctl->ctx = (char *)nfmsg + NLMSG_ALIGN(sizeof(struct nfgenmsg));
        ctl->sz = NFCT_MSG_GOOD_SIZE;
        return (nfct_msg *)ctl;
    }
    return NULL;
}
Exemplo n.º 3
0
static int tunnel_key_init(struct net *net, struct nlattr *nla,
			   struct nlattr *est, struct tc_action **a,
			   int ovr, int bind, bool rtnl_held,
			   struct netlink_ext_ack *extack)
{
	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
	struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
	struct tcf_tunnel_key_params *params_new;
	struct metadata_dst *metadata = NULL;
	struct tc_tunnel_key *parm;
	struct tcf_tunnel_key *t;
	bool exists = false;
	__be16 dst_port = 0;
	__be64 key_id = 0;
	int opts_len = 0;
	__be16 flags = 0;
	u8 tos, ttl;
	int ret = 0;
	int err;

	if (!nla) {
		NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
		return -EINVAL;
	}

	err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy,
			       extack);
	if (err < 0) {
		NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
		return err;
	}

	if (!tb[TCA_TUNNEL_KEY_PARMS]) {
		NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
		return -EINVAL;
	}

	parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
	err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
	if (err < 0)
		return err;
	exists = err;
	if (exists && bind)
		return 0;

	switch (parm->t_action) {
	case TCA_TUNNEL_KEY_ACT_RELEASE:
		break;
	case TCA_TUNNEL_KEY_ACT_SET:
		if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
			__be32 key32;

			key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
			key_id = key32_to_tunnel_id(key32);
			flags = TUNNEL_KEY;
		}

		flags |= TUNNEL_CSUM;
		if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
		    nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
			flags &= ~TUNNEL_CSUM;

		if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
			dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);

		if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
			opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
							   extack);
			if (opts_len < 0) {
				ret = opts_len;
				goto err_out;
			}
		}

		tos = 0;
		if (tb[TCA_TUNNEL_KEY_ENC_TOS])
			tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
		ttl = 0;
		if (tb[TCA_TUNNEL_KEY_ENC_TTL])
			ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);

		if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
		    tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
			__be32 saddr;
			__be32 daddr;

			saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
			daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);

			metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
						    dst_port, flags,
						    key_id, opts_len);
		} else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
			   tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
			struct in6_addr saddr;
			struct in6_addr daddr;

			saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
			daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);

			metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
						      0, flags,
						      key_id, 0);
		} else {
			NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
			ret = -EINVAL;
			goto err_out;
		}

		if (!metadata) {
			NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
			ret = -ENOMEM;
			goto err_out;
		}

		if (opts_len) {
			ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
						  &metadata->u.tun_info,
						  opts_len, extack);
			if (ret < 0)
				goto release_tun_meta;
		}

		metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
		break;
	default:
		NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
		ret = -EINVAL;
		goto err_out;
	}

	if (!exists) {
		ret = tcf_idr_create(tn, parm->index, est, a,
				     &act_tunnel_key_ops, bind, true);
		if (ret) {
			NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
			goto release_tun_meta;
		}

		ret = ACT_P_CREATED;
	} else if (!ovr) {
		NL_SET_ERR_MSG(extack, "TC IDR already exists");
		ret = -EEXIST;
		goto release_tun_meta;
	}

	t = to_tunnel_key(*a);

	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
	if (unlikely(!params_new)) {
		NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
		ret = -ENOMEM;
		exists = true;
		goto release_tun_meta;
	}
	params_new->tcft_action = parm->t_action;
	params_new->tcft_enc_metadata = metadata;

	spin_lock_bh(&t->tcf_lock);
	t->tcf_action = parm->action;
	rcu_swap_protected(t->params, params_new,
			   lockdep_is_held(&t->tcf_lock));
	spin_unlock_bh(&t->tcf_lock);
	if (params_new)
		kfree_rcu(params_new, rcu);

	if (ret == ACT_P_CREATED)
		tcf_idr_insert(tn, *a);

	return ret;

release_tun_meta:
	dst_release(&metadata->dst);

err_out:
	if (exists)
		tcf_idr_release(*a, bind);
	else
		tcf_idr_cleanup(tn, parm->index);
	return ret;
}
Exemplo n.º 4
0
static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
	struct ifinfomsg *ifm;
	struct net_device *dev;
	int err, send_addr_notify = 0, modified = 0;
	struct nlattr *tb[IFLA_MAX+1];
	char ifname[IFNAMSIZ];

	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
	if (err < 0)
		goto errout;

	if (tb[IFLA_IFNAME])
		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
	else
		ifname[0] = '\0';

	err = -EINVAL;
	ifm = nlmsg_data(nlh);
	if (ifm->ifi_index >= 0)
		dev = dev_get_by_index(ifm->ifi_index);
	else if (tb[IFLA_IFNAME])
		dev = dev_get_by_name(ifname);
	else
		goto errout;

	if (dev == NULL) {
		err = -ENODEV;
		goto errout;
	}

	if (tb[IFLA_ADDRESS] &&
	    nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
		goto errout_dev;

	if (tb[IFLA_BROADCAST] &&
	    nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
		goto errout_dev;

	if (tb[IFLA_MAP]) {
		struct rtnl_link_ifmap *u_map;
		struct ifmap k_map;

		if (!dev->set_config) {
			err = -EOPNOTSUPP;
			goto errout_dev;
		}

		if (!netif_device_present(dev)) {
			err = -ENODEV;
			goto errout_dev;
		}

		u_map = nla_data(tb[IFLA_MAP]);
		k_map.mem_start = (unsigned long) u_map->mem_start;
		k_map.mem_end = (unsigned long) u_map->mem_end;
		k_map.base_addr = (unsigned short) u_map->base_addr;
		k_map.irq = (unsigned char) u_map->irq;
		k_map.dma = (unsigned char) u_map->dma;
		k_map.port = (unsigned char) u_map->port;

		err = dev->set_config(dev, &k_map);
		if (err < 0)
			goto errout_dev;

		modified = 1;
	}

	if (tb[IFLA_ADDRESS]) {
		struct sockaddr *sa;
		int len;

		if (!dev->set_mac_address) {
			err = -EOPNOTSUPP;
			goto errout_dev;
		}

		if (!netif_device_present(dev)) {
			err = -ENODEV;
			goto errout_dev;
		}

		len = sizeof(sa_family_t) + dev->addr_len;
		sa = kmalloc(len, GFP_KERNEL);
		if (!sa) {
			err = -ENOMEM;
			goto errout_dev;
		}
		sa->sa_family = dev->type;
		memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
		       dev->addr_len);
		err = dev->set_mac_address(dev, sa);
		kfree(sa);
		if (err)
			goto errout_dev;
		send_addr_notify = 1;
		modified = 1;
	}

	if (tb[IFLA_MTU]) {
		err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
		if (err < 0)
			goto errout_dev;
		modified = 1;
	}

	/*
	 * Interface selected by interface index but interface
	 * name provided implies that a name change has been
	 * requested.
	 */
	if (ifm->ifi_index >= 0 && ifname[0]) {
		err = dev_change_name(dev, ifname);
		if (err < 0)
			goto errout_dev;
		modified = 1;
	}

#ifdef CONFIG_NET_WIRELESS_RTNETLINK
	if (tb[IFLA_WIRELESS]) {
		/* Call Wireless Extensions.
		 * Various stuff checked in there... */
		err = wireless_rtnetlink_set(dev, nla_data(tb[IFLA_WIRELESS]),
					     nla_len(tb[IFLA_WIRELESS]));
		if (err < 0)
			goto errout_dev;
	}
#endif	/* CONFIG_NET_WIRELESS_RTNETLINK */

	if (tb[IFLA_BROADCAST]) {
		nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
		send_addr_notify = 1;
	}


	if (ifm->ifi_flags)
		dev_change_flags(dev, ifm->ifi_flags);

	if (tb[IFLA_TXQLEN])
		dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);

	if (tb[IFLA_WEIGHT])
		dev->weight = nla_get_u32(tb[IFLA_WEIGHT]);

	if (tb[IFLA_OPERSTATE])
		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));

	if (tb[IFLA_LINKMODE]) {
		write_lock_bh(&dev_base_lock);
		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
		write_unlock_bh(&dev_base_lock);
	}

	err = 0;

errout_dev:
	if (err < 0 && modified && net_ratelimit())
		printk(KERN_WARNING "A link change request failed with "
		       "some changes comitted already. Interface %s may "
		       "have been left with an inconsistent configuration, "
		       "please check.\n", dev->name);

	if (send_addr_notify)
		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);

	dev_put(dev);
errout:
	return err;
}
Exemplo n.º 5
0
static int
netlink_trans_request(struct sk_buff *in_skb, struct genl_info *info)
{
    char *buf;
    unsigned int len;
    uint32_t multi_flag;
    struct nlmsghdr *rep, *nlh = info->nlhdr;
    struct genlmsghdr *genlh;
    struct nlattr **aap = info->attrs;
    struct nlattr *nla;
    struct vr_message request, *response;
    struct sk_buff *skb;
    uint32_t netlink_id;

    if (!aap || !(nla = aap[NL_ATTR_VR_MESSAGE_PROTOCOL]))
        return -EINVAL;

    request.vr_message_buf = nla_data(nla);
    request.vr_message_len = nla_len(nla);

    vr_message_request(&request);

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
    netlink_id =  NETLINK_CB(in_skb).pid;
#else
    netlink_id =  NETLINK_CB(in_skb).portid;
#endif

    multi_flag = 0;
    while ((response = vr_message_dequeue_response())) {
        if ((multi_flag == 0) && (!vr_response_queue_empty()))
            multi_flag = NLM_F_MULTI;

        buf = response->vr_message_buf;
        skb = netlink_skb(buf);
        if (!skb)
            continue;

        len = response->vr_message_len;
        len += GENL_HDRLEN + NLA_HDRLEN;
        len = NLMSG_ALIGN(len);
        rep = __nlmsg_put(skb, netlink_id, nlh->nlmsg_seq,
                        nlh->nlmsg_type, len, multi_flag);
        genlh = nlmsg_data(rep);
        memcpy(genlh, info->genlhdr, sizeof(*genlh));

        nla = (struct nlattr *)((char *)genlh + GENL_HDRLEN);
        nla->nla_len = response->vr_message_len;
        nla->nla_type = NL_ATTR_VR_MESSAGE_PROTOCOL;

        netlink_unicast(in_skb->sk, skb, netlink_id, MSG_DONTWAIT);

        response->vr_message_buf = NULL;
        vr_message_free(response);
    }

    if (multi_flag) {
        skb = alloc_skb(NLMSG_HDRLEN, GFP_ATOMIC);
        if (!skb)
            return 0;

        __nlmsg_put(skb, netlink_id, nlh->nlmsg_seq, NLMSG_DONE, 0, 0);
        netlink_unicast(in_skb->sk, skb, netlink_id, MSG_DONTWAIT);
    }


    return 0;
}
Exemplo n.º 6
0
static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
{
	u32 tunnel_id;
	u32 peer_tunnel_id;
	int proto_version;
	int fd;
	int ret = 0;
	struct l2tp_tunnel_cfg cfg = { 0, };
	struct l2tp_tunnel *tunnel;
	struct net *net = genl_info_net(info);

	if (!info->attrs[L2TP_ATTR_CONN_ID]) {
		ret = -EINVAL;
		goto out;
	}
	tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);

	if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) {
		ret = -EINVAL;
		goto out;
	}
	peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]);

	if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) {
		ret = -EINVAL;
		goto out;
	}
	proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]);

	if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) {
		ret = -EINVAL;
		goto out;
	}
	cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]);

	fd = -1;
	if (info->attrs[L2TP_ATTR_FD]) {
		fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
	} else {
#if IS_ENABLED(CONFIG_IPV6)
		if (info->attrs[L2TP_ATTR_IP6_SADDR] &&
		    info->attrs[L2TP_ATTR_IP6_DADDR]) {
			cfg.local_ip6 = nla_data(
				info->attrs[L2TP_ATTR_IP6_SADDR]);
			cfg.peer_ip6 = nla_data(
				info->attrs[L2TP_ATTR_IP6_DADDR]);
		} else
#endif
		if (info->attrs[L2TP_ATTR_IP_SADDR] &&
		    info->attrs[L2TP_ATTR_IP_DADDR]) {
			cfg.local_ip.s_addr = nla_get_be32(
				info->attrs[L2TP_ATTR_IP_SADDR]);
			cfg.peer_ip.s_addr = nla_get_be32(
				info->attrs[L2TP_ATTR_IP_DADDR]);
		} else {
			ret = -EINVAL;
			goto out;
		}
		if (info->attrs[L2TP_ATTR_UDP_SPORT])
			cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
		if (info->attrs[L2TP_ATTR_UDP_DPORT])
			cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
		if (info->attrs[L2TP_ATTR_UDP_CSUM])
			cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]);
	}

	if (info->attrs[L2TP_ATTR_DEBUG])
		cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);

	tunnel = l2tp_tunnel_find(net, tunnel_id);
	if (tunnel != NULL) {
		ret = -EEXIST;
		goto out;
	}

	ret = -EINVAL;
	switch (cfg.encap) {
	case L2TP_ENCAPTYPE_UDP:
	case L2TP_ENCAPTYPE_IP:
		ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id,
					 peer_tunnel_id, &cfg, &tunnel);
		break;
	}

out:
	return ret;
}
Exemplo n.º 7
0
static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
{
	int err;
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_TBF_MAX + 1];
	struct tc_tbf_qopt *qopt;
	struct Qdisc *child = NULL;
	struct psched_ratecfg rate;
	struct psched_ratecfg peak;
	u64 max_size;
	s64 buffer, mtu;
	u64 rate64 = 0, prate64 = 0;

	err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
	if (err < 0)
		return err;

	err = -EINVAL;
	if (tb[TCA_TBF_PARMS] == NULL)
		goto done;

	qopt = nla_data(tb[TCA_TBF_PARMS]);
	if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
		qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
					      tb[TCA_TBF_RTAB]));

	if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
			qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
						      tb[TCA_TBF_PTAB]));

	buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
	mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);

	if (tb[TCA_TBF_RATE64])
		rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
	psched_ratecfg_precompute(&rate, &qopt->rate, rate64);

	max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);

	if (qopt->peakrate.rate) {
		if (tb[TCA_TBF_PRATE64])
			prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
		psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
		if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
			pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
					    peak.rate_bytes_ps, rate.rate_bytes_ps);
			err = -EINVAL;
			goto done;
		}

		max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
	}

	if (max_size < psched_mtu(qdisc_dev(sch)))
		pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
				    max_size, qdisc_dev(sch)->name,
				    psched_mtu(qdisc_dev(sch)));

	if (!max_size) {
		err = -EINVAL;
		goto done;
	}

	if (q->qdisc != &noop_qdisc) {
		err = fifo_set_limit(q->qdisc, qopt->limit);
		if (err)
			goto done;
	} else if (qopt->limit > 0) {
		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
		if (IS_ERR(child)) {
			err = PTR_ERR(child);
			goto done;
		}
	}

	sch_tree_lock(sch);
	if (child) {
		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
		qdisc_destroy(q->qdisc);
		q->qdisc = child;
	}
	q->limit = qopt->limit;
	q->mtu = PSCHED_TICKS2NS(qopt->mtu);
	q->max_size = max_size;
	q->buffer = PSCHED_TICKS2NS(qopt->buffer);
	q->tokens = q->buffer;
	q->ptokens = q->mtu;

	memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
	if (qopt->peakrate.rate) {
		memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
		q->peak_present = true;
	} else {
		q->peak_present = false;
	}

	sch_tree_unlock(sch);
	err = 0;
done:
	return err;
}
static int addr_msg_parser(struct nl_cache_ops *ops, struct sockaddr_nl *who,
			   struct nlmsghdr *nlh, struct nl_parser_param *pp)
{
	struct rtnl_addr *addr;
	struct ifaddrmsg *ifa;
	struct nlattr *tb[IFA_MAX+1];
	int err = -ENOMEM, peer_prefix = 0;

	addr = rtnl_addr_alloc();
	if (!addr) {
		err = nl_errno(ENOMEM);
		goto errout;
	}
	addr->ce_msgtype = nlh->nlmsg_type;

	err = nlmsg_parse(nlh, sizeof(*ifa), tb, IFA_MAX, addr_policy);
	if (err < 0)
		goto errout_free;

	ifa = nlmsg_data(nlh);
	addr->a_family = ifa->ifa_family;
	addr->a_prefixlen = ifa->ifa_prefixlen;
	addr->a_flags = ifa->ifa_flags;
	addr->a_scope = ifa->ifa_scope;
	addr->a_ifindex = ifa->ifa_index;

	addr->ce_mask = (ADDR_ATTR_FAMILY | ADDR_ATTR_PREFIXLEN |
			 ADDR_ATTR_FLAGS | ADDR_ATTR_SCOPE | ADDR_ATTR_IFINDEX);

	if (tb[IFA_LABEL]) {
		nla_strlcpy(addr->a_label, tb[IFA_LABEL], IFNAMSIZ);
		addr->ce_mask |= ADDR_ATTR_LABEL;
	}

	if (tb[IFA_CACHEINFO]) {
		struct ifa_cacheinfo *ca;
		
		ca = nla_data(tb[IFA_CACHEINFO]);
		addr->a_cacheinfo.aci_prefered = ca->ifa_prefered;
		addr->a_cacheinfo.aci_valid = ca->ifa_valid;
		addr->a_cacheinfo.aci_cstamp = ca->cstamp;
		addr->a_cacheinfo.aci_tstamp = ca->tstamp;
		addr->ce_mask |= ADDR_ATTR_CACHEINFO;
	}

	if (tb[IFA_LOCAL]) {
		addr->a_local = nla_get_addr(tb[IFA_LOCAL], addr->a_family);
		if (!addr->a_local)
			goto errout_free;
		addr->ce_mask |= ADDR_ATTR_LOCAL;
	}

	if (tb[IFA_ADDRESS]) {
		struct nl_addr *a;

		a = nla_get_addr(tb[IFA_ADDRESS], addr->a_family);
		if (!a)
			goto errout_free;

		/* IPv6 sends the local address as IFA_ADDRESS with
		 * no IFA_LOCAL, IPv4 sends both IFA_LOCAL and IFA_ADDRESS
		 * with IFA_ADDRESS being the peer address if they differ */
		if (!tb[IFA_LOCAL] || !nl_addr_cmp(a, addr->a_local)) {
			nl_addr_put(addr->a_local);
			addr->a_local = a;
			addr->ce_mask |= ADDR_ATTR_LOCAL;
		} else {
			addr->a_peer = a;
			addr->ce_mask |= ADDR_ATTR_PEER;
			peer_prefix = 1;
		}
	}

	nl_addr_set_prefixlen(peer_prefix ? addr->a_peer : addr->a_local,
			      addr->a_prefixlen);

	if (tb[IFA_BROADCAST]) {
		addr->a_bcast = nla_get_addr(tb[IFA_BROADCAST], addr->a_family);
		if (!addr->a_bcast)
			goto errout_free;

		addr->ce_mask |= ADDR_ATTR_BROADCAST;
	}

	if (tb[IFA_ANYCAST]) {
		addr->a_anycast = nla_get_addr(tb[IFA_ANYCAST], addr->a_family);
		if (!addr->a_anycast)
			goto errout_free;

		addr->ce_mask |= ADDR_ATTR_ANYCAST;
	}

	if (tb[IFA_MULTICAST]) {
		addr->a_multicast = nla_get_addr(tb[IFA_MULTICAST],
						 addr->a_family);
		if (!addr->a_multicast)
			goto errout_free;

		addr->ce_mask |= ADDR_ATTR_MULTICAST;
	}

	err = pp->pp_cb((struct nl_object *) addr, pp);
	if (err < 0)
		goto errout_free;

	err = P_ACCEPT;

errout_free:
	rtnl_addr_put(addr);
errout:
	return err;
}
Exemplo n.º 9
0
static int veth_newlink(struct net_device *dev,
			 struct nlattr *tb[], struct nlattr *data[])
{
	int err;
	struct net_device *peer;
	struct veth_priv *priv;
	char ifname[IFNAMSIZ];
	struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;

	/*
	 * create and register peer first
	 *
	 * struct ifinfomsg is at the head of VETH_INFO_PEER, but we
	 * skip it since no info from it is useful yet
	 */

	if (data != NULL && data[VETH_INFO_PEER] != NULL) {
		struct nlattr *nla_peer;

		nla_peer = data[VETH_INFO_PEER];
		err = nla_parse(peer_tb, IFLA_MAX,
				nla_data(nla_peer) + sizeof(struct ifinfomsg),
				nla_len(nla_peer) - sizeof(struct ifinfomsg),
				ifla_policy);
		if (err < 0)
			return err;

		err = veth_validate(peer_tb, NULL);
		if (err < 0)
			return err;

		tbp = peer_tb;
	} else
		tbp = tb;

	if (tbp[IFLA_IFNAME])
		nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
	else
		snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");

	peer = rtnl_create_link(dev_net(dev), ifname, &veth_link_ops, tbp);
	if (IS_ERR(peer))
		return PTR_ERR(peer);

	if (tbp[IFLA_ADDRESS] == NULL)
		random_ether_addr(peer->dev_addr);

	err = register_netdevice(peer);
	if (err < 0)
		goto err_register_peer;

	netif_carrier_off(peer);

	/*
	 * register dev last
	 *
	 * note, that since we've registered new device the dev's name
	 * should be re-allocated
	 */

	if (tb[IFLA_ADDRESS] == NULL)
		random_ether_addr(dev->dev_addr);

	if (tb[IFLA_IFNAME])
		nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
	else
		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");

	if (strchr(dev->name, '%')) {
		err = dev_alloc_name(dev, dev->name);
		if (err < 0)
			goto err_alloc_name;
	}

	err = register_netdevice(dev);
	if (err < 0)
		goto err_register_dev;

	netif_carrier_off(dev);

	/*
	 * tie the deviced together
	 */

	priv = netdev_priv(dev);
	priv->peer = peer;

	priv = netdev_priv(peer);
	priv->peer = dev;
	return 0;

err_register_dev:
	/* nothing to do */
err_alloc_name:
	unregister_netdevice(peer);
	return err;

err_register_peer:
	free_netdev(peer);
	return err;
}
Exemplo n.º 10
0
static int get_targets_handler(struct nl_msg *n, void *arg)
{
	struct nlmsghdr *nlh = nlmsg_hdr(n);
	struct nlattr *attrs[NFC_ATTR_MAX + 1];
	uint32_t adapter_idx, target_idx, protocols;
	uint16_t sens_res = 0;
	uint8_t sel_res = 0;
	uint8_t nfcid[NFC_MAX_NFCID1_LEN], nfcid_len;
	uint8_t iso15693_dsfid = 0;
	uint8_t iso15693_uid_len, iso15693_uid[NFC_MAX_ISO15693_UID_LEN];

	DBG("");

	genlmsg_parse(nlh, 0, attrs, NFC_ATTR_MAX, NULL);

	adapter_idx = *((uint32_t *)arg);
	target_idx = nla_get_u32(attrs[NFC_ATTR_TARGET_INDEX]);
	protocols = nla_get_u32(attrs[NFC_ATTR_PROTOCOLS]);

	if (attrs[NFC_ATTR_TARGET_SENS_RES])
		sens_res =
			nla_get_u16(attrs[NFC_ATTR_TARGET_SENS_RES]);

	if (attrs[NFC_ATTR_TARGET_SEL_RES])
		sel_res =
			nla_get_u16(attrs[NFC_ATTR_TARGET_SEL_RES]);

	if (attrs[NFC_ATTR_TARGET_NFCID1]) {
		nfcid_len = nla_len(attrs[NFC_ATTR_TARGET_NFCID1]);
		if (nfcid_len <= NFC_MAX_NFCID1_LEN)
			memcpy(nfcid, nla_data(attrs[NFC_ATTR_TARGET_NFCID1]),
								nfcid_len);
	} else {
		nfcid_len = 0;
	}

	if (attrs[NFC_ATTR_TARGET_ISO15693_DSFID])
		iso15693_dsfid =
			nla_get_u8(attrs[NFC_ATTR_TARGET_ISO15693_DSFID]);

	if (attrs[NFC_ATTR_TARGET_ISO15693_UID]) {
		iso15693_uid_len = nla_len(attrs[NFC_ATTR_TARGET_ISO15693_UID]);
		if (iso15693_uid_len == NFC_MAX_ISO15693_UID_LEN)
			memcpy(iso15693_uid,
			       nla_data(attrs[NFC_ATTR_TARGET_ISO15693_UID]),
					NFC_MAX_ISO15693_UID_LEN);
	} else {
		iso15693_uid_len = 0;
	}

	DBG("target idx %d proto 0x%x sens_res 0x%x sel_res 0x%x NFCID len %d",
	    target_idx, protocols, sens_res, sel_res, nfcid_len);
	DBG("\tiso15693_uid_len %d", iso15693_uid_len);

	__near_adapter_add_target(adapter_idx, target_idx, protocols,
				  sens_res, sel_res, nfcid, nfcid_len,
				  iso15693_dsfid,
				  iso15693_uid_len, iso15693_uid);

	return 0;
}
Exemplo n.º 11
0
static __le64 nla_get_hwaddr(const struct nlattr *nla)
{
	return ieee802154_devaddr_from_raw(nla_data(nla));
}
Exemplo n.º 12
0
static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
			  struct tc_action *a, int ovr, int bind)
{
	struct nlattr *tb[TCA_PEDIT_MAX + 1];
	struct tc_pedit *parm;
	int ret = 0, err;
	struct tcf_pedit *p;
	struct tcf_common *pc;
	struct tc_pedit_key *keys = NULL;
	int ksize;

	if (nla == NULL)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy);
	if (err < 0)
		return err;

	if (tb[TCA_PEDIT_PARMS] == NULL)
		return -EINVAL;
	parm = nla_data(tb[TCA_PEDIT_PARMS]);
	ksize = parm->nkeys * sizeof(struct tc_pedit_key);
	if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize)
		return -EINVAL;

	pc = tcf_hash_check(parm->index, a, bind, &pedit_hash_info);
	if (!pc) {
		if (!parm->nkeys)
			return -EINVAL;
		pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
				     &pedit_idx_gen, &pedit_hash_info);
		if (IS_ERR(pc))
		    return PTR_ERR(pc);
		p = to_pedit(pc);
		keys = kmalloc(ksize, GFP_KERNEL);
		if (keys == NULL) {
			kfree(pc);
			return -ENOMEM;
		}
		ret = ACT_P_CREATED;
	} else {
		p = to_pedit(pc);
		if (!ovr) {
			tcf_hash_release(pc, bind, &pedit_hash_info);
			return -EEXIST;
		}
		if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
			keys = kmalloc(ksize, GFP_KERNEL);
			if (keys == NULL)
				return -ENOMEM;
		}
	}

	spin_lock_bh(&p->tcf_lock);
	p->tcfp_flags = parm->flags;
	p->tcf_action = parm->action;
	if (keys) {
		kfree(p->tcfp_keys);
		p->tcfp_keys = keys;
		p->tcfp_nkeys = parm->nkeys;
	}
	memcpy(p->tcfp_keys, parm->keys, ksize);
	spin_unlock_bh(&p->tcf_lock);
	if (ret == ACT_P_CREATED)
		tcf_hash_insert(pc, &pedit_hash_info);
	return ret;
}
Exemplo n.º 13
0
static int validate_nla(const struct nlattr *nla, int maxtype,
			const struct nla_policy *policy)
{
	const struct nla_policy *pt;
	int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);

	if (type <= 0 || type > maxtype)
		return 0;

	pt = &policy[type];

	BUG_ON(pt->type > NLA_TYPE_MAX);

	switch (pt->type) {
	case NLA_FLAG:
		if (attrlen > 0)
			return -ERANGE;
		break;

	case NLA_NUL_STRING:
		if (pt->len)
			minlen = min_t(int, attrlen, pt->len + 1);
		else
			minlen = attrlen;

		if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
			return -EINVAL;
		/* fall through */

	case NLA_STRING:
		if (attrlen < 1)
			return -ERANGE;

		if (pt->len) {
			char *buf = nla_data(nla);

			if (buf[attrlen - 1] == '\0')
				attrlen--;

			if (attrlen > pt->len)
				return -ERANGE;
		}
		break;

	case NLA_BINARY:
		if (pt->len && attrlen > pt->len)
			return -ERANGE;
		break;

	case NLA_NESTED_COMPAT:
		if (attrlen < pt->len)
			return -ERANGE;
		if (attrlen < NLA_ALIGN(pt->len))
			break;
		if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
			return -ERANGE;
		nla = nla_data(nla) + NLA_ALIGN(pt->len);
		if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
			return -ERANGE;
		break;
	case NLA_NESTED:
		/* a nested attributes is allowed to be empty; if its not,
		 * it must have a size of at least NLA_HDRLEN.
		 */
		if (attrlen == 0)
			break;
	default:
		if (pt->len)
			minlen = pt->len;
		else if (pt->type != NLA_UNSPEC)
			minlen = nla_attr_minlen[pt->type];

		if (attrlen < minlen)
			return -ERANGE;
	}
Exemplo n.º 14
0
Arquivo: tc.c Projeto: HongweiBi/libnl
int rtnl_tc_msg_parse(struct nlmsghdr *n, struct rtnl_tc *tc)
{
	struct nl_cache *link_cache;
	struct rtnl_tc_ops *ops;
	struct nlattr *tb[TCA_MAX + 1];
	char kind[TCKINDSIZ];
	struct tcmsg *tm;
	int err;

	tc->ce_msgtype = n->nlmsg_type;

	err = nlmsg_parse(n, sizeof(*tm), tb, TCA_MAX, tc_policy);
	if (err < 0)
		return err;

	if (tb[TCA_KIND] == NULL)
		return -NLE_MISSING_ATTR;

	nla_strlcpy(kind, tb[TCA_KIND], sizeof(kind));
	rtnl_tc_set_kind(tc, kind);

	tm = nlmsg_data(n);
	tc->tc_family  = tm->tcm_family;
	tc->tc_ifindex = tm->tcm_ifindex;
	tc->tc_handle  = tm->tcm_handle;
	tc->tc_parent  = tm->tcm_parent;
	tc->tc_info    = tm->tcm_info;

	tc->ce_mask |= (TCA_ATTR_FAMILY | TCA_ATTR_IFINDEX | TCA_ATTR_HANDLE|
		        TCA_ATTR_PARENT | TCA_ATTR_INFO);

	if (tb[TCA_OPTIONS]) {
		tc->tc_opts = nl_data_alloc_attr(tb[TCA_OPTIONS]);
		if (!tc->tc_opts)
			return -NLE_NOMEM;
		tc->ce_mask |= TCA_ATTR_OPTS;
	}

	if (tb[TCA_STATS2]) {
		struct nlattr *tbs[TCA_STATS_MAX + 1];

		err = nla_parse_nested(tbs, TCA_STATS_MAX, tb[TCA_STATS2],
				       tc_stats2_policy);
		if (err < 0)
			return err;

		if (tbs[TCA_STATS_BASIC]) {
			struct gnet_stats_basic *bs;
			
			bs = nla_data(tbs[TCA_STATS_BASIC]);
			tc->tc_stats[RTNL_TC_BYTES]	= bs->bytes;
			tc->tc_stats[RTNL_TC_PACKETS]	= bs->packets;
		}

		if (tbs[TCA_STATS_RATE_EST]) {
			struct gnet_stats_rate_est *re;

			re = nla_data(tbs[TCA_STATS_RATE_EST]);
			tc->tc_stats[RTNL_TC_RATE_BPS]	= re->bps;
			tc->tc_stats[RTNL_TC_RATE_PPS]	= re->pps;
		}
		
		if (tbs[TCA_STATS_QUEUE]) {
			struct gnet_stats_queue *q;

			q = nla_data(tbs[TCA_STATS_QUEUE]);
			tc->tc_stats[RTNL_TC_QLEN]	= q->qlen;
			tc->tc_stats[RTNL_TC_BACKLOG]	= q->backlog;
			tc->tc_stats[RTNL_TC_DROPS]	= q->drops;
			tc->tc_stats[RTNL_TC_REQUEUES]	= q->requeues;
			tc->tc_stats[RTNL_TC_OVERLIMITS]	= q->overlimits;
		}

		tc->ce_mask |= TCA_ATTR_STATS;
		
		if (tbs[TCA_STATS_APP]) {
			tc->tc_xstats = nl_data_alloc_attr(tbs[TCA_STATS_APP]);
			if (tc->tc_xstats == NULL)
				return -NLE_NOMEM;
		} else
			goto compat_xstats;
	} else {
		if (tb[TCA_STATS]) {
			struct tc_stats *st = nla_data(tb[TCA_STATS]);

			tc->tc_stats[RTNL_TC_BYTES]	= st->bytes;
			tc->tc_stats[RTNL_TC_PACKETS]	= st->packets;
			tc->tc_stats[RTNL_TC_RATE_BPS]	= st->bps;
			tc->tc_stats[RTNL_TC_RATE_PPS]	= st->pps;
			tc->tc_stats[RTNL_TC_QLEN]	= st->qlen;
			tc->tc_stats[RTNL_TC_BACKLOG]	= st->backlog;
			tc->tc_stats[RTNL_TC_DROPS]	= st->drops;
			tc->tc_stats[RTNL_TC_OVERLIMITS]= st->overlimits;

			tc->ce_mask |= TCA_ATTR_STATS;
		}

compat_xstats:
		if (tb[TCA_XSTATS]) {
			tc->tc_xstats = nl_data_alloc_attr(tb[TCA_XSTATS]);
			if (tc->tc_xstats == NULL)
				return -NLE_NOMEM;
			tc->ce_mask |= TCA_ATTR_XSTATS;
		}
	}

	ops = rtnl_tc_get_ops(tc);
	if (ops && ops->to_msg_parser) {
		void *data = rtnl_tc_data(tc);

		if (!data)
			return -NLE_NOMEM;

		err = ops->to_msg_parser(tc, data);
		if (err < 0)
			return err;
	}

	if ((link_cache = __nl_cache_mngt_require("route/link"))) {
		struct rtnl_link *link;

		if ((link = rtnl_link_get(link_cache, tc->tc_ifindex))) {
			rtnl_tc_set_link(tc, link);

			/* rtnl_tc_set_link incs refcnt */
			rtnl_link_put(link);
		}
	}

	return 0;
}
Exemplo n.º 15
0
/*
 * Main event dispatcher. Called from other parts and drivers.
 * Send the event on the appropriate channels.
 * May be called from interrupt context.
 */
void wireless_send_event(struct net_device *	dev,
			 unsigned int		cmd,
			 union iwreq_data *	wrqu,
			 char *		extra)
{
	const struct iw_ioctl_description *	descr = NULL;
	int extra_len = 0;
	struct iw_event  *event;		/* Mallocated whole event */
	int event_len;				/* Its size */
	int hdr_len;				/* Size of the event header */
	int wrqu_off = 0;			/* Offset in wrqu */
	/* Don't "optimise" the following variable, it will crash */
	unsigned	cmd_index;		/* *MUST* be unsigned */
	struct sk_buff *skb;
	struct nlmsghdr *nlh;
	struct nlattr *nla;
#ifdef CONFIG_COMPAT
	struct __compat_iw_event *compat_event;
	struct compat_iw_point compat_wrqu;
	struct sk_buff *compskb;
#endif

	/*
	 * Nothing in the kernel sends scan events with data, be safe.
	 * This is necessary because we cannot fix up scan event data
	 * for compat, due to being contained in 'extra', but normally
	 * applications are required to retrieve the scan data anyway
	 * and no data is included in the event, this codifies that
	 * practice.
	 */
	if (WARN_ON(cmd == SIOCGIWSCAN && extra))
		extra = NULL;

	/* Get the description of the Event */
	if (cmd <= SIOCIWLAST) {
		cmd_index = IW_IOCTL_IDX(cmd);
		if (cmd_index < standard_ioctl_num)
			descr = &(standard_ioctl[cmd_index]);
	} else {
		cmd_index = IW_EVENT_IDX(cmd);
		if (cmd_index < standard_event_num)
			descr = &(standard_event[cmd_index]);
	}
	/* Don't accept unknown events */
	if (descr == NULL) {
		/* Note : we don't return an error to the driver, because
		 * the driver would not know what to do about it. It can't
		 * return an error to the user, because the event is not
		 * initiated by a user request.
		 * The best the driver could do is to log an error message.
		 * We will do it ourselves instead...
		 */
		printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n",
		       dev->name, cmd);
		return;
	}

	/* Check extra parameters and set extra_len */
	if (descr->header_type == IW_HEADER_TYPE_POINT) {
		/* Check if number of token fits within bounds */
		if (wrqu->data.length > descr->max_tokens) {
			printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length);
			return;
		}
		if (wrqu->data.length < descr->min_tokens) {
			printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length);
			return;
		}
		/* Calculate extra_len - extra is NULL for restricted events */
		if (extra != NULL)
			extra_len = wrqu->data.length * descr->token_size;
		/* Always at an offset in wrqu */
		wrqu_off = IW_EV_POINT_OFF;
	}

	/* Total length of the event */
	hdr_len = event_type_size[descr->header_type];
	event_len = hdr_len + extra_len;

	/*
	 * The problem for 64/32 bit.
	 *
	 * On 64-bit, a regular event is laid out as follows:
	 *      |  0  |  1  |  2  |  3  |  4  |  5  |  6  |  7  |
	 *      | event.len | event.cmd |     p a d d i n g     |
	 *      | wrqu data ... (with the correct size)         |
	 *
	 * This padding exists because we manipulate event->u,
	 * and 'event' is not packed.
	 *
	 * An iw_point event is laid out like this instead:
	 *      |  0  |  1  |  2  |  3  |  4  |  5  |  6  |  7  |
	 *      | event.len | event.cmd |     p a d d i n g     |
	 *      | iwpnt.len | iwpnt.flg |     p a d d i n g     |
	 *      | extra data  ...
	 *
	 * The second padding exists because struct iw_point is extended,
	 * but this depends on the platform...
	 *
	 * On 32-bit, all the padding shouldn't be there.
	 */

	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
	if (!skb)
		return;

	/* Send via the RtNetlink event channel */
	nlh = rtnetlink_ifinfo_prep(dev, skb);
	if (WARN_ON(!nlh)) {
		kfree_skb(skb);
		return;
	}

	/* Add the wireless events in the netlink packet */
	nla = nla_reserve(skb, IFLA_WIRELESS, event_len);
	if (!nla) {
		kfree_skb(skb);
		return;
	}
	event = nla_data(nla);

	/* Fill event - first clear to avoid data leaking */
	memset(event, 0, hdr_len);
	event->len = event_len;
	event->cmd = cmd;
	memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN);
	if (extra_len)
		memcpy(((char *) event) + hdr_len, extra, extra_len);

	nlmsg_end(skb, nlh);
#ifdef CONFIG_COMPAT
	hdr_len = compat_event_type_size[descr->header_type];
	event_len = hdr_len + extra_len;

	compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
	if (!compskb) {
		kfree_skb(skb);
		return;
	}

	/* Send via the RtNetlink event channel */
	nlh = rtnetlink_ifinfo_prep(dev, compskb);
	if (WARN_ON(!nlh)) {
		kfree_skb(skb);
		kfree_skb(compskb);
		return;
	}

	/* Add the wireless events in the netlink packet */
	nla = nla_reserve(compskb, IFLA_WIRELESS, event_len);
	if (!nla) {
		kfree_skb(skb);
		kfree_skb(compskb);
		return;
	}
	compat_event = nla_data(nla);

	compat_event->len = event_len;
	compat_event->cmd = cmd;
	if (descr->header_type == IW_HEADER_TYPE_POINT) {
		compat_wrqu.length = wrqu->data.length;
		compat_wrqu.flags = wrqu->data.flags;
		memcpy(&compat_event->pointer,
			((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF,
			hdr_len - IW_EV_COMPAT_LCP_LEN);
		if (extra_len)
			memcpy(((char *) compat_event) + hdr_len,
				extra, extra_len);
	} else {
		/* extra_len must be zero, so no if (extra) needed */
		memcpy(&compat_event->pointer, wrqu,
			hdr_len - IW_EV_COMPAT_LCP_LEN);
	}

	nlmsg_end(compskb, nlh);

	skb_shinfo(skb)->frag_list = compskb;
#endif

	skb_queue_tail(&wireless_nlevent_queue, skb);
	tasklet_schedule(&wireless_nlevent_tasklet);

}
Exemplo n.º 16
0
static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
{
	int err;
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_TBF_PTAB + 1];
	struct tc_tbf_qopt *qopt;
	struct qdisc_rate_table *rtab = NULL;
	struct qdisc_rate_table *ptab = NULL;
	struct Qdisc *child = NULL;
	int max_size, n;

	err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
	if (err < 0)
		return err;

	err = -EINVAL;
	if (tb[TCA_TBF_PARMS] == NULL)
		goto done;

	qopt = nla_data(tb[TCA_TBF_PARMS]);
	rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
	if (rtab == NULL)
		goto done;

	if (qopt->peakrate.rate) {
		if (qopt->peakrate.rate > qopt->rate.rate)
			ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
		if (ptab == NULL)
			goto done;
	}

	for (n = 0; n < 256; n++)
		if (rtab->data[n] > qopt->buffer)
			break;
	max_size = (n << qopt->rate.cell_log) - 1;
	if (ptab) {
		int size;

		for (n = 0; n < 256; n++)
			if (ptab->data[n] > qopt->mtu)
				break;
		size = (n << qopt->peakrate.cell_log) - 1;
		if (size < max_size)
			max_size = size;
	}
	if (max_size < 0)
		goto done;

	if (q->qdisc != &noop_qdisc) {
		err = fifo_set_limit(q->qdisc, qopt->limit);
		if (err)
			goto done;
	} else if (qopt->limit > 0) {
		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
		if (IS_ERR(child)) {
			err = PTR_ERR(child);
			goto done;
		}
	}

	sch_tree_lock(sch);
	if (child) {
		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
		qdisc_destroy(q->qdisc);
		q->qdisc = child;
	}
	q->limit = qopt->limit;
	q->mtu = qopt->mtu;
	q->max_size = max_size;
	q->buffer = qopt->buffer;
	q->tokens = q->buffer;
	q->ptokens = q->mtu;

	swap(q->R_tab, rtab);
	swap(q->P_tab, ptab);

	sch_tree_unlock(sch);
	err = 0;
#if defined(CONFIG_ASF_EGRESS_SHAPER) || defined(CONFIG_ASF_HW_SHAPER)
	_tbf_add_hook(sch, qopt->rate.rate, qopt->limit,
			qopt->buffer, qopt->rate.mpu);
#endif

done:
	if (rtab)
		qdisc_put_rtab(rtab);
	if (ptab)
		qdisc_put_rtab(ptab);
	return err;
}
Exemplo n.º 17
0
static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info)
{
	u32 tunnel_id = 0;
	u32 session_id;
	u32 peer_session_id;
	int ret = 0;
	struct l2tp_tunnel *tunnel;
	struct l2tp_session *session;
	struct l2tp_session_cfg cfg = { 0, };
	struct net *net = genl_info_net(info);

	if (!info->attrs[L2TP_ATTR_CONN_ID]) {
		ret = -EINVAL;
		goto out;
	}
	tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
	tunnel = l2tp_tunnel_find(net, tunnel_id);
	if (!tunnel) {
		ret = -ENODEV;
		goto out;
	}

	if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
		ret = -EINVAL;
		goto out;
	}
	session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
	session = l2tp_session_find(net, tunnel, session_id);
	if (session) {
		ret = -EEXIST;
		goto out;
	}

	if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
		ret = -EINVAL;
		goto out;
	}
	peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);

	if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
		ret = -EINVAL;
		goto out;
	}
	cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
	if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
		ret = -EINVAL;
		goto out;
	}

	if (tunnel->version > 2) {
		if (info->attrs[L2TP_ATTR_OFFSET])
			cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);

		if (info->attrs[L2TP_ATTR_DATA_SEQ])
			cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);

		cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT;
		if (info->attrs[L2TP_ATTR_L2SPEC_TYPE])
			cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]);

		cfg.l2specific_len = 4;
		if (info->attrs[L2TP_ATTR_L2SPEC_LEN])
			cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]);

		if (info->attrs[L2TP_ATTR_COOKIE]) {
			u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
			if (len > 8) {
				ret = -EINVAL;
				goto out;
			}
			cfg.cookie_len = len;
			memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
		}
		if (info->attrs[L2TP_ATTR_PEER_COOKIE]) {
			u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
			if (len > 8) {
				ret = -EINVAL;
				goto out;
			}
			cfg.peer_cookie_len = len;
			memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
		}
		if (info->attrs[L2TP_ATTR_IFNAME])
			cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);

		if (info->attrs[L2TP_ATTR_VLAN_ID])
			cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]);
	}

	if (info->attrs[L2TP_ATTR_DEBUG])
		cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);

	if (info->attrs[L2TP_ATTR_RECV_SEQ])
		cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);

	if (info->attrs[L2TP_ATTR_SEND_SEQ])
		cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);

	if (info->attrs[L2TP_ATTR_LNS_MODE])
		cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);

	if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
		cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);

	if (info->attrs[L2TP_ATTR_MTU])
		cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);

	if (info->attrs[L2TP_ATTR_MRU])
		cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);

	if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
	    (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
		ret = -EPROTONOSUPPORT;
		goto out;
	}

	/* Check that pseudowire-specific params are present */
	switch (cfg.pw_type) {
	case L2TP_PWTYPE_NONE:
		break;
	case L2TP_PWTYPE_ETH_VLAN:
		if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
			ret = -EINVAL;
			goto out;
		}
		break;
	case L2TP_PWTYPE_ETH:
		break;
	case L2TP_PWTYPE_PPP:
	case L2TP_PWTYPE_PPP_AC:
		break;
	case L2TP_PWTYPE_IP:
	default:
		ret = -EPROTONOSUPPORT;
		break;
	}

	ret = -EPROTONOSUPPORT;
	if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create)
		ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
			session_id, peer_session_id, &cfg);

out:
	return ret;
}
Exemplo n.º 18
0
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int
__build_packet_message(struct nfulnl_instance *inst,
			const struct sk_buff *skb,
			unsigned int data_len,
			u_int8_t pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const char *prefix, unsigned int plen)
{
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	sk_buff_data_t old_tail = inst->skb->tail;
	struct sock *sk;

	nlh = nlmsg_put(inst->skb, 0, 0,
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg), 0);
	if (!nlh)
		return -1;
	nfmsg = nlmsg_data(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	pmsg.hw_protocol	= skb->protocol;
	pmsg.hook		= hooknum;

	if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
		goto nla_put_failure;

	if (prefix &&
	    nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
		goto nla_put_failure;

	if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
				 htonl(indev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(indev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(indev->ifindex)))
				goto nla_put_failure;
			if (skb->nf_bridge && skb->nf_bridge->physindev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(skb->nf_bridge->physindev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (outdev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
				 htonl(outdev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(outdev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(outdev->ifindex)))
				goto nla_put_failure;
			if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(skb->nf_bridge->physoutdev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (skb->mark &&
	    nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
		goto nla_put_failure;

	if (indev && skb->dev &&
	    skb->mac_header != skb->network_header) {
		struct nfulnl_msg_packet_hw phw;
		int len = dev_parse_header(skb, phw.hw_addr);
		if (len > 0) {
			phw.hw_addrlen = htons(len);
			if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
				goto nla_put_failure;
		}
	}

	if (indev && skb_mac_header_was_set(skb)) {
		if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
		    nla_put_be16(inst->skb, NFULA_HWLEN,
				 htons(skb->dev->hard_header_len)) ||
		    nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
			    skb_mac_header(skb)))
			goto nla_put_failure;
	}

	if (skb->tstamp.tv64) {
		struct nfulnl_msg_packet_timestamp ts;
		struct timeval tv = ktime_to_timeval(skb->tstamp);
		ts.sec = cpu_to_be64(tv.tv_sec);
		ts.usec = cpu_to_be64(tv.tv_usec);

		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
			goto nla_put_failure;
	}

	/* UID */
	sk = skb->sk;
	if (sk && sk->sk_state != TCP_TIME_WAIT) {
		read_lock_bh(&sk->sk_callback_lock);
		if (sk->sk_socket && sk->sk_socket->file) {
			struct file *file = sk->sk_socket->file;
			const struct cred *cred = file->f_cred;
			struct user_namespace *user_ns = inst->peer_user_ns;
			__be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
			__be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
			read_unlock_bh(&sk->sk_callback_lock);
			if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
			    nla_put_be32(inst->skb, NFULA_GID, gid))
				goto nla_put_failure;
		} else
			read_unlock_bh(&sk->sk_callback_lock);
	}

	/* local sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ) &&
	    nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
		goto nla_put_failure;

	/* global sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
	    nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
			 htonl(atomic_inc_return(&global_seq))))
		goto nla_put_failure;

	if (data_len) {
		struct nlattr *nla;
		int size = nla_attr_size(data_len);

		if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
			printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
			return -1;
		}

		nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
		nla->nla_type = NFULA_PAYLOAD;
		nla->nla_len = size;

		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
			BUG();
	}

	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nla_put_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
Exemplo n.º 19
0
static int tcf_gact_init(struct net *net, struct nlattr *nla,
			 struct nlattr *est, struct tc_action *a,
			 int ovr, int bind)
{
	struct nlattr *tb[TCA_GACT_MAX + 1];
	struct tc_gact *parm;
	struct tcf_gact *gact;
	int ret = 0;
	int err;
#ifdef CONFIG_GACT_PROB
	struct tc_gact_p *p_parm = NULL;
#endif

	if (nla == NULL)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_GACT_MAX, nla, gact_policy);
	if (err < 0)
		return err;

	if (tb[TCA_GACT_PARMS] == NULL)
		return -EINVAL;
	parm = nla_data(tb[TCA_GACT_PARMS]);

#ifndef CONFIG_GACT_PROB
	if (tb[TCA_GACT_PROB] != NULL)
		return -EOPNOTSUPP;
#else
	if (tb[TCA_GACT_PROB]) {
		p_parm = nla_data(tb[TCA_GACT_PROB]);
		if (p_parm->ptype >= MAX_RAND)
			return -EINVAL;
	}
#endif

	if (!tcf_hash_check(parm->index, a, bind)) {
		ret = tcf_hash_create(parm->index, est, a, sizeof(*gact),
				      bind, true);
		if (ret)
			return ret;
		ret = ACT_P_CREATED;
	} else {
		if (bind)/* dont override defaults */
			return 0;
		tcf_hash_release(a, bind);
		if (!ovr)
			return -EEXIST;
	}

	gact = to_gact(a);

	ASSERT_RTNL();
	gact->tcf_action = parm->action;
#ifdef CONFIG_GACT_PROB
	if (p_parm) {
		gact->tcfg_paction = p_parm->paction;
		gact->tcfg_pval    = max_t(u16, 1, p_parm->pval);
		/* Make sure tcfg_pval is written before tcfg_ptype
		 * coupled with smp_rmb() in gact_net_rand() & gact_determ()
		 */
		smp_wmb();
		gact->tcfg_ptype   = p_parm->ptype;
	}
#endif
	if (ret == ACT_P_CREATED)
		tcf_hash_insert(a);
	return ret;
}
Exemplo n.º 20
0
static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
		    const char **helper, bool log)
{
	struct nlattr *a;
	int rem;

	nla_for_each_nested(a, attr, rem) {
		int type = nla_type(a);
		int maxlen = ovs_ct_attr_lens[type].maxlen;
		int minlen = ovs_ct_attr_lens[type].minlen;

		if (type > OVS_CT_ATTR_MAX) {
			OVS_NLERR(log,
				  "Unknown conntrack attr (type=%d, max=%d)",
				  type, OVS_CT_ATTR_MAX);
			return -EINVAL;
		}
		if (nla_len(a) < minlen || nla_len(a) > maxlen) {
			OVS_NLERR(log,
				  "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
				  type, nla_len(a), maxlen);
			return -EINVAL;
		}

		switch (type) {
		case OVS_CT_ATTR_COMMIT:
			info->commit = true;
			break;
#ifdef CONFIG_NF_CONNTRACK_ZONES
		case OVS_CT_ATTR_ZONE:
			info->zone.id = nla_get_u16(a);
			break;
#endif
#ifdef CONFIG_NF_CONNTRACK_MARK
		case OVS_CT_ATTR_MARK: {
			struct md_mark *mark = nla_data(a);

			if (!mark->mask) {
				OVS_NLERR(log, "ct_mark mask cannot be 0");
				return -EINVAL;
			}
			info->mark = *mark;
			break;
		}
#endif
#ifdef CONFIG_NF_CONNTRACK_LABELS
		case OVS_CT_ATTR_LABELS: {
			struct md_labels *labels = nla_data(a);

			if (!labels_nonzero(&labels->mask)) {
				OVS_NLERR(log, "ct_labels mask cannot be 0");
				return -EINVAL;
			}
			info->labels = *labels;
			break;
		}
#endif
		case OVS_CT_ATTR_HELPER:
			*helper = nla_data(a);
			if (!memchr(*helper, '\0', nla_len(a))) {
				OVS_NLERR(log, "Invalid conntrack helper");
				return -EINVAL;
			}
			break;
		default:
			OVS_NLERR(log, "Unknown conntrack attr (%d)",
				  type);
			return -EINVAL;
		}
	}
Exemplo n.º 21
0
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
			    void *iwbuf, int iwbuflen, int type, u32 pid,
			    u32 seq, u32 change, unsigned int flags)
{
	struct ifinfomsg *ifm;
	struct nlmsghdr *nlh;

	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
	if (nlh == NULL)
		return -EMSGSIZE;

	ifm = nlmsg_data(nlh);
	ifm->ifi_family = AF_UNSPEC;
	ifm->__ifi_pad = 0;
	ifm->ifi_type = dev->type;
	ifm->ifi_index = dev->ifindex;
	ifm->ifi_flags = dev_get_flags(dev);
	ifm->ifi_change = change;

	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
	NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len);
	NLA_PUT_U32(skb, IFLA_WEIGHT, dev->weight);
	NLA_PUT_U8(skb, IFLA_OPERSTATE,
		   netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
	NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
	NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);

	if (dev->ifindex != dev->iflink)
		NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);

	if (dev->master)
		NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);

	if (dev->qdisc_sleeping)
		NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc_sleeping->ops->id);

	if (1) {
		struct rtnl_link_ifmap map = {
			.mem_start   = dev->mem_start,
			.mem_end     = dev->mem_end,
			.base_addr   = dev->base_addr,
			.irq         = dev->irq,
			.dma         = dev->dma,
			.port        = dev->if_port,
		};
		NLA_PUT(skb, IFLA_MAP, sizeof(map), &map);
	}

	if (dev->addr_len) {
		NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
		NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
	}

	if (dev->get_stats) {
		struct net_device_stats *stats = dev->get_stats(dev);
		if (stats) {
			struct nlattr *attr;

			attr = nla_reserve(skb, IFLA_STATS,
					   sizeof(struct rtnl_link_stats));
			if (attr == NULL)
				goto nla_put_failure;

			copy_rtnl_link_stats(nla_data(attr), stats);
		}
	}

	if (iwbuf)
		NLA_PUT(skb, IFLA_WIRELESS, iwbuflen, iwbuf);

	return nlmsg_end(skb, nlh);

nla_put_failure:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
	int idx;
	int s_idx = cb->args[0];
	struct net_device *dev;

	read_lock(&dev_base_lock);
	for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
		if (idx < s_idx)
			continue;
		if (rtnl_fill_ifinfo(skb, dev, NULL, 0, RTM_NEWLINK,
				     NETLINK_CB(cb->skb).pid,
				     cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0)
			break;
	}
	read_unlock(&dev_base_lock);
	cb->args[0] = idx;

	return skb->len;
}
Exemplo n.º 22
0
static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
{
	struct net_device *dev = qdisc_dev(sch);
	struct mqprio_sched *priv = qdisc_priv(sch);
	struct netdev_queue *dev_queue;
	struct Qdisc *qdisc;
	int i, err = -EOPNOTSUPP;
	struct tc_mqprio_qopt *qopt = NULL;

	BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
	BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);

	if (sch->parent != TC_H_ROOT)
		return -EOPNOTSUPP;

	if (!netif_is_multiqueue(dev))
		return -EOPNOTSUPP;

	if (nla_len(opt) < sizeof(*qopt))
		return -EINVAL;

	qopt = nla_data(opt);
	if (mqprio_parse_opt(dev, qopt))
		return -EINVAL;

	/* pre-allocate qdisc, attachment can't fail */
	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
			       GFP_KERNEL);
	if (priv->qdiscs == NULL) {
		err = -ENOMEM;
		goto err;
	}

	for (i = 0; i < dev->num_tx_queues; i++) {
		dev_queue = netdev_get_tx_queue(dev, i);
		qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
					  TC_H_MAKE(TC_H_MAJ(sch->handle),
						    TC_H_MIN(i + 1)));
		if (qdisc == NULL) {
			err = -ENOMEM;
			goto err;
		}
		priv->qdiscs[i] = qdisc;
	}

	/* If the mqprio options indicate that hardware should own
	 * the queue mapping then run ndo_setup_tc otherwise use the
	 * supplied and verified mapping
	 */
	if (qopt->hw) {
		priv->hw_owned = 1;
		err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
		if (err)
			goto err;
	} else {
		netdev_set_num_tc(dev, qopt->num_tc);
		for (i = 0; i < qopt->num_tc; i++)
			netdev_set_tc_queue(dev, i,
					    qopt->count[i], qopt->offset[i]);
	}

	/* Always use supplied priority mappings */
	for (i = 0; i < TC_BITMASK + 1; i++)
		netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);

	sch->flags |= TCQ_F_MQROOT;
	return 0;

err:
	mqprio_destroy(sch);
	return err;
}
Exemplo n.º 23
0
static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
			struct tc_action *a, int ovr, int bind)
{
	struct nlattr *tb[TCA_IPT_MAX + 1];
	struct tcf_ipt *ipt;
	struct tcf_common *pc;
	struct xt_entry_target *td, *t;
	char *tname;
	int ret = 0, err;
	u32 hook = 0;
	u32 index = 0;

	if (nla == NULL)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_IPT_MAX, nla, ipt_policy);
	if (err < 0)
		return err;

	if (tb[TCA_IPT_HOOK] == NULL)
		return -EINVAL;
	if (tb[TCA_IPT_TARG] == NULL)
		return -EINVAL;

	td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
	if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
		return -EINVAL;

	if (tb[TCA_IPT_INDEX] != NULL)
		index = nla_get_u32(tb[TCA_IPT_INDEX]);

	pc = tcf_hash_check(index, a, bind, &ipt_hash_info);
	if (!pc) {
		pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind,
				     &ipt_idx_gen, &ipt_hash_info);
		if (IS_ERR(pc))
			return PTR_ERR(pc);
		ret = ACT_P_CREATED;
	} else {
		if (!ovr) {
			tcf_ipt_release(to_ipt(pc), bind);
			return -EEXIST;
		}
	}
	ipt = to_ipt(pc);

	hook = nla_get_u32(tb[TCA_IPT_HOOK]);

	err = -ENOMEM;
	tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
	if (unlikely(!tname))
		goto err1;
	if (tb[TCA_IPT_TABLE] == NULL ||
	    nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
		strcpy(tname, "mangle");

	t = kmemdup(td, td->u.target_size, GFP_KERNEL);
	if (unlikely(!t))
		goto err2;

	err = ipt_init_target(t, tname, hook);
	if (err < 0)
		goto err3;

	spin_lock_bh(&ipt->tcf_lock);
	if (ret != ACT_P_CREATED) {
		ipt_destroy_target(ipt->tcfi_t);
		kfree(ipt->tcfi_tname);
		kfree(ipt->tcfi_t);
	}
	ipt->tcfi_tname = tname;
	ipt->tcfi_t     = t;
	ipt->tcfi_hook  = hook;
	spin_unlock_bh(&ipt->tcf_lock);
	if (ret == ACT_P_CREATED)
		tcf_hash_insert(pc, &ipt_hash_info);
	return ret;

err3:
	kfree(t);
err2:
	kfree(tname);
err1:
	kfree(pc);
	return err;
}
Exemplo n.º 24
0
static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
			 struct tc_action *a, int ovr, int bind)
{
	struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
	struct tc_skbedit *parm;
	struct tcf_skbedit *d;
	struct tcf_common *pc;
	u32 flags = 0, *priority = NULL;
	u16 *queue_mapping = NULL;
	int ret = 0, err;

	if (nla == NULL)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy);
	if (err < 0)
		return err;

	if (tb[TCA_SKBEDIT_PARMS] == NULL)
		return -EINVAL;

	if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
		flags |= SKBEDIT_F_PRIORITY;
		priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
	}

	if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
		flags |= SKBEDIT_F_QUEUE_MAPPING;
		queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
	}
	if (!flags)
		return -EINVAL;

	parm = nla_data(tb[TCA_SKBEDIT_PARMS]);

	pc = tcf_hash_check(parm->index, a, bind, &skbedit_hash_info);
	if (!pc) {
		pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
				     &skbedit_idx_gen, &skbedit_hash_info);
		if (IS_ERR(pc))
		    return PTR_ERR(pc);

		d = to_skbedit(pc);
		ret = ACT_P_CREATED;
	} else {
		d = to_skbedit(pc);
		if (!ovr) {
			tcf_hash_release(pc, bind, &skbedit_hash_info);
			return -EEXIST;
		}
	}

	spin_lock_bh(&d->tcf_lock);

	d->flags = flags;
	if (flags & SKBEDIT_F_PRIORITY)
		d->priority = *priority;
	if (flags & SKBEDIT_F_QUEUE_MAPPING)
		d->queue_mapping = *queue_mapping;
	d->tcf_action = parm->action;

	spin_unlock_bh(&d->tcf_lock);

	if (ret == ACT_P_CREATED)
		tcf_hash_insert(pc, &skbedit_hash_info);
	return ret;
}
Exemplo n.º 25
0
static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
			   struct tc_action *a, int ovr, int bind)
{
	struct nlattr *tb[TCA_MIRRED_MAX + 1];
	struct tc_mirred *parm;
	struct tcf_mirred *m;
	struct tcf_common *pc;
	struct net_device *dev = NULL;
	int ret = 0, err;
	int ok_push = 0;

	if (nla == NULL)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy);
	if (err < 0)
		return err;

	if (tb[TCA_MIRRED_PARMS] == NULL)
		return -EINVAL;
	parm = nla_data(tb[TCA_MIRRED_PARMS]);

	if (parm->ifindex) {
		dev = __dev_get_by_index(&init_net, parm->ifindex);
		if (dev == NULL)
			return -ENODEV;
		switch (dev->type) {
			case ARPHRD_TUNNEL:
			case ARPHRD_TUNNEL6:
			case ARPHRD_SIT:
			case ARPHRD_IPGRE:
			case ARPHRD_VOID:
			case ARPHRD_NONE:
				ok_push = 0;
				break;
			default:
				ok_push = 1;
				break;
		}
	}

	pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info);
	if (!pc) {
		if (!parm->ifindex)
			return -EINVAL;
		pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind,
				     &mirred_idx_gen, &mirred_hash_info);
		if (unlikely(!pc))
			return -ENOMEM;
		ret = ACT_P_CREATED;
	} else {
		if (!ovr) {
			tcf_mirred_release(to_mirred(pc), bind);
			return -EEXIST;
		}
	}
	m = to_mirred(pc);

	spin_lock_bh(&m->tcf_lock);
	m->tcf_action = parm->action;
	m->tcfm_eaction = parm->eaction;
	if (parm->ifindex) {
		m->tcfm_ifindex = parm->ifindex;
		if (ret != ACT_P_CREATED)
			dev_put(m->tcfm_dev);
		m->tcfm_dev = dev;
		dev_hold(dev);
		m->tcfm_ok_push = ok_push;
	}
	spin_unlock_bh(&m->tcf_lock);
	if (ret == ACT_P_CREATED)
		tcf_hash_insert(pc, &mirred_hash_info);

	return ret;
}
Exemplo n.º 26
0
static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
					 struct nlattr **attrs)
{
	char *name;
	struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
	struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
	struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
	int err;

	if (!attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
			       NULL);
	if (err)
		return err;

	if (!link[TIPC_NLA_LINK_PROP])
		return -EINVAL;

	err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX,
			       link[TIPC_NLA_LINK_PROP], NULL);
	if (err)
		return err;

	if (!link[TIPC_NLA_LINK_STATS])
		return -EINVAL;

	err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX,
			       link[TIPC_NLA_LINK_STATS], NULL);
	if (err)
		return err;

	name = (char *)TLV_DATA(msg->req);
	if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
		return 0;

	tipc_tlv_sprintf(msg->rep, "\nLink <%s>\n",
			 nla_data(link[TIPC_NLA_LINK_NAME]));

	if (link[TIPC_NLA_LINK_BROADCAST]) {
		__fill_bc_link_stat(msg, prop, stats);
		return 0;
	}

	if (link[TIPC_NLA_LINK_ACTIVE])
		tipc_tlv_sprintf(msg->rep, "  ACTIVE");
	else if (link[TIPC_NLA_LINK_UP])
		tipc_tlv_sprintf(msg->rep, "  STANDBY");
	else
		tipc_tlv_sprintf(msg->rep, "  DEFUNCT");

	tipc_tlv_sprintf(msg->rep, "  MTU:%u  Priority:%u",
			 nla_get_u32(link[TIPC_NLA_LINK_MTU]),
			 nla_get_u32(prop[TIPC_NLA_PROP_PRIO]));

	tipc_tlv_sprintf(msg->rep, "  Tolerance:%u ms  Window:%u packets\n",
			 nla_get_u32(prop[TIPC_NLA_PROP_TOL]),
			 nla_get_u32(prop[TIPC_NLA_PROP_WIN]));

	tipc_tlv_sprintf(msg->rep,
			 "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
			 nla_get_u32(link[TIPC_NLA_LINK_RX]) -
			 nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]),
			 nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]),
			 nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]),
			 nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]),
			 nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED]));

	tipc_tlv_sprintf(msg->rep,
			 "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
			 nla_get_u32(link[TIPC_NLA_LINK_TX]) -
			 nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]),
			 nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]),
			 nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]),
			 nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]),
			 nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED]));

	tipc_tlv_sprintf(msg->rep,
			 "  TX profile sample:%u packets  average:%u octets\n",
			 nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_CNT]),
			 nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_TOT]) /
			 nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT]));

	tipc_tlv_sprintf(msg->rep,
			 "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% ",
			 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P0]),
			      nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
			 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P1]),
			      nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
			 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P2]),
			      nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
			 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P3]),
			      nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])));

	tipc_tlv_sprintf(msg->rep, "-16384:%u%% -32768:%u%% -66000:%u%%\n",
			 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P4]),
			      nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
			 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P5]),
			      nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
			 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P6]),
			      nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])));

	tipc_tlv_sprintf(msg->rep,
			 "  RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
			 nla_get_u32(stats[TIPC_NLA_STATS_RX_STATES]),
			 nla_get_u32(stats[TIPC_NLA_STATS_RX_PROBES]),
			 nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]),
			 nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]),
			 nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES]));

	tipc_tlv_sprintf(msg->rep,
			 "  TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
			 nla_get_u32(stats[TIPC_NLA_STATS_TX_STATES]),
			 nla_get_u32(stats[TIPC_NLA_STATS_TX_PROBES]),
			 nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]),
			 nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]),
			 nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED]));

	tipc_tlv_sprintf(msg->rep,
			 "  Congestion link:%u  Send queue max:%u avg:%u",
			 nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]),
			 nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]),
			 nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE]));

	return 0;
}
Exemplo n.º 27
0
Arquivo: wowlan.c Projeto: resfi/resfi
static int print_wowlan_handler(struct nl_msg *msg, void *arg)
{
	struct nlattr *attrs[NL80211_ATTR_MAX + 1];
	struct nlattr *trig[NUM_NL80211_WOWLAN_TRIG];
	struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg));
	struct nlattr *pattern;
	int rem_pattern;

	nla_parse(attrs, NL80211_ATTR_MAX, genlmsg_attrdata(gnlh, 0),
		  genlmsg_attrlen(gnlh, 0), NULL);

	if (!attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) {
		printf("WoWLAN is disabled.\n");
		return NL_SKIP;
	}

	/* XXX: use policy */
	nla_parse(trig, MAX_NL80211_WOWLAN_TRIG,
		  nla_data(attrs[NL80211_ATTR_WOWLAN_TRIGGERS]),
		  nla_len(attrs[NL80211_ATTR_WOWLAN_TRIGGERS]),
		  NULL);

	printf("WoWLAN is enabled:\n");
	if (trig[NL80211_WOWLAN_TRIG_ANY])
		printf(" * wake up on special any trigger\n");
	if (trig[NL80211_WOWLAN_TRIG_DISCONNECT])
		printf(" * wake up on disconnect\n");
	if (trig[NL80211_WOWLAN_TRIG_MAGIC_PKT])
		printf(" * wake up on magic packet\n");
	if (trig[NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE])
		printf(" * wake up on GTK rekeying failure\n");
	if (trig[NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST])
		printf(" * wake up on EAP identity request\n");
	if (trig[NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE])
		printf(" * wake up on 4-way handshake\n");
	if (trig[NL80211_WOWLAN_TRIG_RFKILL_RELEASE])
		printf(" * wake up on RF-kill release\n");
	if (trig[NL80211_WOWLAN_TRIG_NET_DETECT]) {
		struct nlattr *match, *freq,
			*nd[NUM_NL80211_ATTR], *tb[NUM_NL80211_ATTR];
		int rem_match;

		printf(" * wake up on network detection\n");
		nla_parse(nd, NUM_NL80211_ATTR,
			  nla_data(trig[NL80211_WOWLAN_TRIG_NET_DETECT]),
			  nla_len(trig[NL80211_WOWLAN_TRIG_NET_DETECT]), NULL);

		if (nd[NL80211_ATTR_SCHED_SCAN_INTERVAL])
			printf("\tscan interval: %u msecs\n",
			       nla_get_u32(nd[NL80211_ATTR_SCHED_SCAN_INTERVAL]));

		if (nd[NL80211_ATTR_SCHED_SCAN_DELAY])
			printf("\tinitial scan delay: %u secs\n",
			       nla_get_u32(nd[NL80211_ATTR_SCHED_SCAN_DELAY]));

		if (nd[NL80211_ATTR_SCHED_SCAN_MATCH]) {
			printf("\tmatches:\n");
			nla_for_each_nested(match,
					    nd[NL80211_ATTR_SCHED_SCAN_MATCH],
					    rem_match) {
				nla_parse(tb, NUM_NL80211_ATTR, nla_data(match),
					  nla_len(match),
					  NULL);
				printf("\t\tSSID: ");
				print_ssid_escaped(
					nla_len(tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]),
					nla_data(tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]));
				printf("\n");
			}
		}
		if (nd[NL80211_ATTR_SCAN_FREQUENCIES]) {
			printf("\tfrequencies:");
			nla_for_each_nested(freq,
					    nd[NL80211_ATTR_SCAN_FREQUENCIES],
					    rem_match) {
				printf(" %d", nla_get_u32(freq));
			}
			printf("\n");
		}
Exemplo n.º 28
0
static int choke_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct choke_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_CHOKE_MAX + 1];
	const struct tc_red_qopt *ctl;
	int err;
	struct sk_buff **old = NULL;
	unsigned int mask;
	u32 max_P;

	if (opt == NULL)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
	if (err < 0)
		return err;

	if (tb[TCA_CHOKE_PARMS] == NULL ||
	    tb[TCA_CHOKE_STAB] == NULL)
		return -EINVAL;

	max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;

	ctl = nla_data(tb[TCA_CHOKE_PARMS]);

	if (ctl->limit > CHOKE_MAX_QUEUE)
		return -EINVAL;

	mask = roundup_pow_of_two(ctl->limit + 1) - 1;
	if (mask != q->tab_mask) {
		struct sk_buff **ntab;

		ntab = kcalloc(mask + 1, sizeof(struct sk_buff *),
			       GFP_KERNEL | __GFP_NOWARN);
		if (!ntab)
			ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
		if (!ntab)
			return -ENOMEM;

		sch_tree_lock(sch);
		old = q->tab;
		if (old) {
			unsigned int oqlen = sch->q.qlen, tail = 0;

			while (q->head != q->tail) {
				struct sk_buff *skb = q->tab[q->head];

				q->head = (q->head + 1) & q->tab_mask;
				if (!skb)
					continue;
				if (tail < mask) {
					ntab[tail++] = skb;
					continue;
				}
				sch->qstats.backlog -= qdisc_pkt_len(skb);
				--sch->q.qlen;
				qdisc_drop(skb, sch);
			}
			qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
			q->head = 0;
			q->tail = tail;
		}

		q->tab_mask = mask;
		q->tab = ntab;
	} else
		sch_tree_lock(sch);

	q->flags = ctl->flags;
	q->limit = ctl->limit;

	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
		      ctl->Plog, ctl->Scell_log,
		      nla_data(tb[TCA_CHOKE_STAB]),
		      max_P);
	red_set_vars(&q->vars);

	if (q->head == q->tail)
		red_end_of_idle_period(&q->vars);

	sch_tree_unlock(sch);
	choke_free(old);
	return 0;
}
Exemplo n.º 29
0
static int pmsr_parse_peer(struct cfg80211_registered_device *rdev,
			   struct nlattr *peer,
			   struct cfg80211_pmsr_request_peer *out,
			   struct genl_info *info)
{
	struct nlattr *tb[NL80211_PMSR_PEER_ATTR_MAX + 1];
	struct nlattr *req[NL80211_PMSR_REQ_ATTR_MAX + 1];
	struct nlattr *treq;
	int err, rem;

	/* no validation needed - was already done via nested policy */
	nla_parse_nested(tb, NL80211_PMSR_PEER_ATTR_MAX, peer, NULL, NULL);

	if (!tb[NL80211_PMSR_PEER_ATTR_ADDR] ||
	    !tb[NL80211_PMSR_PEER_ATTR_CHAN] ||
	    !tb[NL80211_PMSR_PEER_ATTR_REQ]) {
		NL_SET_ERR_MSG_ATTR(info->extack, peer,
				    "insufficient peer data");
		return -EINVAL;
	}

	memcpy(out->addr, nla_data(tb[NL80211_PMSR_PEER_ATTR_ADDR]), ETH_ALEN);

	/* reuse info->attrs */
	memset(info->attrs, 0, sizeof(*info->attrs) * (NL80211_ATTR_MAX + 1));
	/* need to validate here, we don't want to have validation recursion */
	err = nla_parse_nested(info->attrs, NL80211_ATTR_MAX,
			       tb[NL80211_PMSR_PEER_ATTR_CHAN],
			       nl80211_policy, info->extack);
	if (err)
		return err;

	err = nl80211_parse_chandef(rdev, info, &out->chandef);
	if (err)
		return err;

	/* no validation needed - was already done via nested policy */
	nla_parse_nested(req, NL80211_PMSR_REQ_ATTR_MAX,
			 tb[NL80211_PMSR_PEER_ATTR_REQ],
			 NULL, NULL);

	if (!req[NL80211_PMSR_REQ_ATTR_DATA]) {
		NL_SET_ERR_MSG_ATTR(info->extack,
				    tb[NL80211_PMSR_PEER_ATTR_REQ],
				    "missing request type/data");
		return -EINVAL;
	}

	if (req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF])
		out->report_ap_tsf = true;

	if (out->report_ap_tsf && !rdev->wiphy.pmsr_capa->report_ap_tsf) {
		NL_SET_ERR_MSG_ATTR(info->extack,
				    req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF],
				    "reporting AP TSF is not supported");
		return -EINVAL;
	}

	nla_for_each_nested(treq, req[NL80211_PMSR_REQ_ATTR_DATA], rem) {
		switch (nla_type(treq)) {
		case NL80211_PMSR_TYPE_FTM:
			err = pmsr_parse_ftm(rdev, treq, out, info);
			break;
		default:
			NL_SET_ERR_MSG_ATTR(info->extack, treq,
					    "unsupported measurement type");
			err = -EINVAL;
		}
	}

	if (err)
		return err;

	return 0;
}
Exemplo n.º 30
0
static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
		      struct netlink_ext_ack *extack)
{
	int err;
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_TBF_MAX + 1];
	struct tc_tbf_qopt *qopt;
	struct Qdisc *child = NULL;
	struct psched_ratecfg rate;
	struct psched_ratecfg peak;
	u64 max_size;
	s64 buffer, mtu;
	u64 rate64 = 0, prate64 = 0;

	err = nla_parse_nested_deprecated(tb, TCA_TBF_MAX, opt, tbf_policy,
					  NULL);
	if (err < 0)
		return err;

	err = -EINVAL;
	if (tb[TCA_TBF_PARMS] == NULL)
		goto done;

	qopt = nla_data(tb[TCA_TBF_PARMS]);
	if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
		qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
					      tb[TCA_TBF_RTAB],
					      NULL));

	if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
			qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
						      tb[TCA_TBF_PTAB],
						      NULL));

	buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
	mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);

	if (tb[TCA_TBF_RATE64])
		rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
	psched_ratecfg_precompute(&rate, &qopt->rate, rate64);

	if (tb[TCA_TBF_BURST]) {
		max_size = nla_get_u32(tb[TCA_TBF_BURST]);
		buffer = psched_l2t_ns(&rate, max_size);
	} else {
		max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
	}

	if (qopt->peakrate.rate) {
		if (tb[TCA_TBF_PRATE64])
			prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
		psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
		if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
			pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
					peak.rate_bytes_ps, rate.rate_bytes_ps);
			err = -EINVAL;
			goto done;
		}

		if (tb[TCA_TBF_PBURST]) {
			u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
			max_size = min_t(u32, max_size, pburst);
			mtu = psched_l2t_ns(&peak, pburst);
		} else {
			max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
		}
	} else {
		memset(&peak, 0, sizeof(peak));
	}

	if (max_size < psched_mtu(qdisc_dev(sch)))
		pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
				    max_size, qdisc_dev(sch)->name,
				    psched_mtu(qdisc_dev(sch)));

	if (!max_size) {
		err = -EINVAL;
		goto done;
	}

	if (q->qdisc != &noop_qdisc) {
		err = fifo_set_limit(q->qdisc, qopt->limit);
		if (err)
			goto done;
	} else if (qopt->limit > 0) {
		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
					 extack);
		if (IS_ERR(child)) {
			err = PTR_ERR(child);
			goto done;
		}

		/* child is fifo, no need to check for noop_qdisc */
		qdisc_hash_add(child, true);
	}

	sch_tree_lock(sch);
	if (child) {
		qdisc_tree_flush_backlog(q->qdisc);
		qdisc_put(q->qdisc);
		q->qdisc = child;
	}
	q->limit = qopt->limit;
	if (tb[TCA_TBF_PBURST])
		q->mtu = mtu;
	else
		q->mtu = PSCHED_TICKS2NS(qopt->mtu);
	q->max_size = max_size;
	if (tb[TCA_TBF_BURST])
		q->buffer = buffer;
	else
		q->buffer = PSCHED_TICKS2NS(qopt->buffer);
	q->tokens = q->buffer;
	q->ptokens = q->mtu;

	memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
	memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));

	sch_tree_unlock(sch);
	err = 0;
done:
	return err;
}