static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
			   int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_tunnel_key *t = to_tunnel_key(a);
	struct tcf_tunnel_key_params *params;
	struct tc_tunnel_key opt = {
		.index    = t->tcf_index,
		.refcnt   = refcount_read(&t->tcf_refcnt) - ref,
		.bindcnt  = atomic_read(&t->tcf_bindcnt) - bind,
	};
	struct tcf_t tm;

	spin_lock_bh(&t->tcf_lock);
	params = rcu_dereference_protected(t->params,
					   lockdep_is_held(&t->tcf_lock));
	opt.action   = t->tcf_action;
	opt.t_action = params->tcft_action;

	if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;

	if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
		struct ip_tunnel_info *info =
			&params->tcft_enc_metadata->u.tun_info;
		struct ip_tunnel_key *key = &info->key;
		__be32 key_id = tunnel_id_to_key32(key->tun_id);

		if (((key->tun_flags & TUNNEL_KEY) &&
		     nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
		    tunnel_key_dump_addresses(skb,
					      &params->tcft_enc_metadata->u.tun_info) ||
		    (key->tp_dst &&
		      nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
				   key->tp_dst)) ||
		    nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
			       !(key->tun_flags & TUNNEL_CSUM)) ||
		    tunnel_key_opts_dump(skb, info))
			goto nla_put_failure;

		if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
			goto nla_put_failure;

		if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
			goto nla_put_failure;
	}

	tcf_tm_dump(&tm, &t->tcf_tm);
	if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
			  &tm, TCA_TUNNEL_KEY_PAD))
		goto nla_put_failure;
	spin_unlock_bh(&t->tcf_lock);

	return skb->len;

nla_put_failure:
	spin_unlock_bh(&t->tcf_lock);
	nlmsg_trim(skb, b);
	return -1;
}

static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
			     struct netlink_callback *cb, int type,
			     const struct tc_action_ops *ops,
			     struct netlink_ext_ack *extack)
{
	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);

	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}

static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
{
	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);

	return tcf_idr_search(tn, a, index);
}

static struct tc_action_ops act_tunnel_key_ops = {
	.kind		=	"tunnel_key",
	.type		=	TCA_ACT_TUNNEL_KEY,
	.owner		=	THIS_MODULE,
	.act		=	tunnel_key_act,
	.dump		=	tunnel_key_dump,
	.init		=	tunnel_key_init,
	.cleanup	=	tunnel_key_release,
	.walk		=	tunnel_key_walker,
	.lookup		=	tunnel_key_search,
	.size		=	sizeof(struct tcf_tunnel_key),
};

static __net_init int tunnel_key_init_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);

	return tc_action_net_init(tn, &act_tunnel_key_ops);
}

static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
{
	tc_action_net_exit(net_list, tunnel_key_net_id);
}

static struct pernet_operations tunnel_key_net_ops = {
	.init = tunnel_key_init_net,
	.exit_batch = tunnel_key_exit_net,
	.id   = &tunnel_key_net_id,
	.size = sizeof(struct tc_action_net),
};

static int __init tunnel_key_init_module(void)
{
	return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
}

static void __exit tunnel_key_cleanup_module(void)
{
	tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
}

module_init(tunnel_key_init_module);
module_exit(tunnel_key_cleanup_module);

MODULE_AUTHOR("Amir Vadai <*****@*****.**>");
MODULE_DESCRIPTION("ip tunnel manipulation actions");
MODULE_LICENSE("GPL v2");
Exemple #2
0
static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
			   struct netlink_callback *cb,
			   const struct smc_diag_req *req,
			   struct nlattr *bc)
{
	struct smc_sock *smc = smc_sk(sk);
	struct smc_diag_fallback fallback;
	struct user_namespace *user_ns;
	struct smc_diag_msg *r;
	struct nlmsghdr *nlh;

	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
			cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI);
	if (!nlh)
		return -EMSGSIZE;

	r = nlmsg_data(nlh);
	smc_diag_msg_common_fill(r, sk);
	r->diag_state = sk->sk_state;
	if (smc->use_fallback)
		r->diag_mode = SMC_DIAG_MODE_FALLBACK_TCP;
	else if (smc->conn.lgr && smc->conn.lgr->is_smcd)
		r->diag_mode = SMC_DIAG_MODE_SMCD;
	else
		r->diag_mode = SMC_DIAG_MODE_SMCR;
	user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
	if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
		goto errout;

	fallback.reason = smc->fallback_rsn;
	fallback.peer_diagnosis = smc->peer_diagnosis;
	if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0)
		goto errout;

	if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
	    smc->conn.alert_token_local) {
		struct smc_connection *conn = &smc->conn;
		struct smc_diag_conninfo cinfo = {
			.token = conn->alert_token_local,
			.sndbuf_size = conn->sndbuf_desc ?
				conn->sndbuf_desc->len : 0,
			.rmbe_size = conn->rmb_desc ? conn->rmb_desc->len : 0,
			.peer_rmbe_size = conn->peer_rmbe_size,

			.rx_prod.wrap = conn->local_rx_ctrl.prod.wrap,
			.rx_prod.count = conn->local_rx_ctrl.prod.count,
			.rx_cons.wrap = conn->local_rx_ctrl.cons.wrap,
			.rx_cons.count = conn->local_rx_ctrl.cons.count,

			.tx_prod.wrap = conn->local_tx_ctrl.prod.wrap,
			.tx_prod.count = conn->local_tx_ctrl.prod.count,
			.tx_cons.wrap = conn->local_tx_ctrl.cons.wrap,
			.tx_cons.count = conn->local_tx_ctrl.cons.count,

			.tx_prod_flags =
				*(u8 *)&conn->local_tx_ctrl.prod_flags,
			.tx_conn_state_flags =
				*(u8 *)&conn->local_tx_ctrl.conn_state_flags,
			.rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags,
			.rx_conn_state_flags =
				*(u8 *)&conn->local_rx_ctrl.conn_state_flags,

			.tx_prep.wrap = conn->tx_curs_prep.wrap,
			.tx_prep.count = conn->tx_curs_prep.count,
			.tx_sent.wrap = conn->tx_curs_sent.wrap,
			.tx_sent.count = conn->tx_curs_sent.count,
			.tx_fin.wrap = conn->tx_curs_fin.wrap,
			.tx_fin.count = conn->tx_curs_fin.count,
		};

		if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0)
			goto errout;
	}

	if (smc->conn.lgr && !smc->conn.lgr->is_smcd &&
	    (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
	    !list_empty(&smc->conn.lgr->list)) {
		struct smc_diag_lgrinfo linfo = {
			.role = smc->conn.lgr->role,
			.lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
			.lnk[0].link_id = smc->conn.lgr->lnk[0].link_id,
		};

		memcpy(linfo.lnk[0].ibname,
		       smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
		       sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name));
		smc_gid_be16_convert(linfo.lnk[0].gid,
				     smc->conn.lgr->lnk[0].gid);
		smc_gid_be16_convert(linfo.lnk[0].peer_gid,
				     smc->conn.lgr->lnk[0].peer_gid);

		if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
			goto errout;
	}
	if (smc->conn.lgr && smc->conn.lgr->is_smcd &&
	    (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
	    !list_empty(&smc->conn.lgr->list)) {
		struct smc_connection *conn = &smc->conn;
		struct smcd_diag_dmbinfo dinfo = {
			.linkid = *((u32 *)conn->lgr->id),
			.peer_gid = conn->lgr->peer_gid,
			.my_gid = conn->lgr->smcd->local_gid,
			.token = conn->rmb_desc->token,
			.peer_token = conn->peer_token
		};

		if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
			goto errout;
	}

	nlmsg_end(skb, nlh);
	return 0;

errout:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
			       struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *bc = NULL;
	struct hlist_head *head;
	struct sock *sk;
	int rc = 0;

	read_lock(&prot->h.smc_hash->lock);
	head = &prot->h.smc_hash->ht;
	if (hlist_empty(head))
		goto out;

	sk_for_each(sk, head) {
		if (!net_eq(sock_net(sk), net))
			continue;
		rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
		if (rc)
			break;
	}

out:
	read_unlock(&prot->h.smc_hash->lock);
	return rc;
}

static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int rc = 0;

	rc = smc_diag_dump_proto(&smc_proto, skb, cb);
	if (!rc)
		rc = smc_diag_dump_proto(&smc_proto6, skb, cb);
	return rc;
}

static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
{
	struct net *net = sock_net(skb->sk);

	if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
	    h->nlmsg_flags & NLM_F_DUMP) {
		{
			struct netlink_dump_control c = {
				.dump = smc_diag_dump,
				.min_dump_alloc = SKB_WITH_OVERHEAD(32768),
			};
			return netlink_dump_start(net->diag_nlsk, skb, h, &c);
		}
	}
	return 0;
}

static const struct sock_diag_handler smc_diag_handler = {
	.family = AF_SMC,
	.dump = smc_diag_handler_dump,
};

static int __init smc_diag_init(void)
{
	return sock_diag_register(&smc_diag_handler);
}

static void __exit smc_diag_exit(void)
{
	sock_diag_unregister(&smc_diag_handler);
}

module_init(smc_diag_init);
module_exit(smc_diag_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */);
Exemple #3
0
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
                      struct user_namespace *user_ns,
                      u32 portid, u32 seq, u16 nlmsg_flags,
                      const struct nlmsghdr *unlh,
                      bool net_admin)
{
    const struct tcp_congestion_ops *ca_ops;
    const struct inet_diag_handler *handler;
    int ext = req->idiag_ext;
    struct inet_diag_msg *r;
    struct nlmsghdr  *nlh;
    struct nlattr *attr;
    void *info = NULL;

    handler = inet_diag_table[req->sdiag_protocol];
    BUG_ON(!handler);

    nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
                    nlmsg_flags);
    if (!nlh)
        return -EMSGSIZE;

    r = nlmsg_data(nlh);
    BUG_ON(!sk_fullsock(sk));

    inet_diag_msg_common_fill(r, sk);
    r->idiag_state = sk->sk_state;
    r->idiag_timer = 0;
    r->idiag_retrans = 0;

    if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
        goto errout;

    if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
        struct inet_diag_meminfo minfo = {
            .idiag_rmem = sk_rmem_alloc_get(sk),
            .idiag_wmem = sk->sk_wmem_queued,
            .idiag_fmem = sk->sk_forward_alloc,
            .idiag_tmem = sk_wmem_alloc_get(sk),
        };

        if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
            goto errout;
    }

    if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
        if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
            goto errout;

    if (!icsk) {
        handler->idiag_get_info(sk, r, NULL);
        goto out;
    }

    if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
            icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
            icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
        r->idiag_timer = 1;
        r->idiag_retrans = icsk->icsk_retransmits;
        r->idiag_expires =
            jiffies_to_msecs(icsk->icsk_timeout - jiffies);
    } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
        r->idiag_timer = 4;
        r->idiag_retrans = icsk->icsk_probes_out;
        r->idiag_expires =
            jiffies_to_msecs(icsk->icsk_timeout - jiffies);
    } else if (timer_pending(&sk->sk_timer)) {
        r->idiag_timer = 2;
        r->idiag_retrans = icsk->icsk_probes_out;
        r->idiag_expires =
            jiffies_to_msecs(sk->sk_timer.expires - jiffies);
    } else {
        r->idiag_timer = 0;
        r->idiag_expires = 0;
    }

    if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
        attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
                                 handler->idiag_info_size,
                                 INET_DIAG_PAD);
        if (!attr)
            goto errout;

        info = nla_data(attr);
    }

    if (ext & (1 << (INET_DIAG_CONG - 1))) {
        int err = 0;

        rcu_read_lock();
        ca_ops = READ_ONCE(icsk->icsk_ca_ops);
        if (ca_ops)
            err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name);
        rcu_read_unlock();
        if (err < 0)
            goto errout;
    }

    handler->idiag_get_info(sk, r, info);

    if (sk->sk_state < TCP_TIME_WAIT) {
        union tcp_cc_info info;
        size_t sz = 0;
        int attr;

        rcu_read_lock();
        ca_ops = READ_ONCE(icsk->icsk_ca_ops);
        if (ca_ops && ca_ops->get_info)
            sz = ca_ops->get_info(sk, ext, &attr, &info);
        rcu_read_unlock();
        if (sz && nla_put(skb, attr, sz, &info) < 0)
            goto errout;
    }

out:
    nlmsg_end(skb, nlh);
    return 0;

errout:
    nlmsg_cancel(skb, nlh);
    return -EMSGSIZE;
}
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);

static int inet_csk_diag_fill(struct sock *sk,
                              struct sk_buff *skb,
                              const struct inet_diag_req_v2 *req,
                              struct user_namespace *user_ns,
                              u32 portid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh,
                              bool net_admin)
{
    return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns,
                             portid, seq, nlmsg_flags, unlh, net_admin);
}

static int inet_twsk_diag_fill(struct sock *sk,
                               struct sk_buff *skb,
                               u32 portid, u32 seq, u16 nlmsg_flags,
                               const struct nlmsghdr *unlh)
{
    struct inet_timewait_sock *tw = inet_twsk(sk);
    struct inet_diag_msg *r;
    struct nlmsghdr *nlh;
    long tmo;

    nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
                    nlmsg_flags);
    if (!nlh)
        return -EMSGSIZE;

    r = nlmsg_data(nlh);
    BUG_ON(tw->tw_state != TCP_TIME_WAIT);

    tmo = tw->tw_timer.expires - jiffies;
    if (tmo < 0)
        tmo = 0;

    inet_diag_msg_common_fill(r, sk);
    r->idiag_retrans      = 0;

    r->idiag_state	      = tw->tw_substate;
    r->idiag_timer	      = 3;
    r->idiag_expires      = jiffies_to_msecs(tmo);
    r->idiag_rqueue	      = 0;
    r->idiag_wqueue	      = 0;
    r->idiag_uid	      = 0;
    r->idiag_inode	      = 0;

    nlmsg_end(skb, nlh);
    return 0;
}

static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
                              u32 portid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh, bool net_admin)
{
    struct request_sock *reqsk = inet_reqsk(sk);
    struct inet_diag_msg *r;
    struct nlmsghdr *nlh;
    long tmo;

    nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
                    nlmsg_flags);
    if (!nlh)
        return -EMSGSIZE;

    r = nlmsg_data(nlh);
    inet_diag_msg_common_fill(r, sk);
    r->idiag_state = TCP_SYN_RECV;
    r->idiag_timer = 1;
    r->idiag_retrans = reqsk->num_retrans;

    BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
                 offsetof(struct sock, sk_cookie));

    tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
    r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
    r->idiag_rqueue	= 0;
    r->idiag_wqueue	= 0;
    r->idiag_uid	= 0;
    r->idiag_inode	= 0;

    if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
                                 inet_rsk(reqsk)->ir_mark))
        return -EMSGSIZE;

    nlmsg_end(skb, nlh);
    return 0;
}

static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                        const struct inet_diag_req_v2 *r,
                        struct user_namespace *user_ns,
                        u32 portid, u32 seq, u16 nlmsg_flags,
                        const struct nlmsghdr *unlh, bool net_admin)
{
    if (sk->sk_state == TCP_TIME_WAIT)
        return inet_twsk_diag_fill(sk, skb, portid, seq,
                                   nlmsg_flags, unlh);

    if (sk->sk_state == TCP_NEW_SYN_RECV)
        return inet_req_diag_fill(sk, skb, portid, seq,
                                  nlmsg_flags, unlh, net_admin);

    return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
                              nlmsg_flags, unlh, net_admin);
}

struct sock *inet_diag_find_one_icsk(struct net *net,
                                     struct inet_hashinfo *hashinfo,
                                     const struct inet_diag_req_v2 *req)
{
    struct sock *sk;

    rcu_read_lock();
    if (req->sdiag_family == AF_INET)
        sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0],
                         req->id.idiag_dport, req->id.idiag_src[0],
                         req->id.idiag_sport, req->id.idiag_if);
#if IS_ENABLED(CONFIG_IPV6)
    else if (req->sdiag_family == AF_INET6) {
        if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
                ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
            sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[3],
                             req->id.idiag_dport, req->id.idiag_src[3],
                             req->id.idiag_sport, req->id.idiag_if);
        else
            sk = inet6_lookup(net, hashinfo, NULL, 0,
                              (struct in6_addr *)req->id.idiag_dst,
                              req->id.idiag_dport,
                              (struct in6_addr *)req->id.idiag_src,
                              req->id.idiag_sport,
                              req->id.idiag_if);
    }
#endif
    else {
        rcu_read_unlock();
        return ERR_PTR(-EINVAL);
    }
    rcu_read_unlock();
    if (!sk)
        return ERR_PTR(-ENOENT);

    if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
        sock_gen_put(sk);
        return ERR_PTR(-ENOENT);
    }

    return sk;
}
EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);

int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
                            struct sk_buff *in_skb,
                            const struct nlmsghdr *nlh,
                            const struct inet_diag_req_v2 *req)
{
    struct net *net = sock_net(in_skb->sk);
    struct sk_buff *rep;
    struct sock *sk;
    int err;

    sk = inet_diag_find_one_icsk(net, hashinfo, req);
    if (IS_ERR(sk))
        return PTR_ERR(sk);

    rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
    if (!rep) {
        err = -ENOMEM;
        goto out;
    }

    err = sk_diag_fill(sk, rep, req,
                       sk_user_ns(NETLINK_CB(in_skb).sk),
                       NETLINK_CB(in_skb).portid,
                       nlh->nlmsg_seq, 0, nlh,
                       netlink_net_capable(in_skb, CAP_NET_ADMIN));
    if (err < 0) {
        WARN_ON(err == -EMSGSIZE);
        nlmsg_free(rep);
        goto out;
    }
    err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
                          MSG_DONTWAIT);
    if (err > 0)
        err = 0;

out:
    if (sk)
        sock_gen_put(sk);

    return err;
}
EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);

static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
                               const struct nlmsghdr *nlh,
                               const struct inet_diag_req_v2 *req)
{
    const struct inet_diag_handler *handler;
    int err;

    handler = inet_diag_lock_handler(req->sdiag_protocol);
    if (IS_ERR(handler))
        err = PTR_ERR(handler);
    else if (cmd == SOCK_DIAG_BY_FAMILY)
        err = handler->dump_one(in_skb, nlh, req);
    else if (cmd == SOCK_DESTROY && handler->destroy)
        err = handler->destroy(in_skb, req);
    else
        err = -EOPNOTSUPP;
    inet_diag_unlock_handler(handler);

    return err;
}

static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
{
    int words = bits >> 5;

    bits &= 0x1f;

    if (words) {
        if (memcmp(a1, a2, words << 2))
            return 0;
    }
    if (bits) {
        __be32 w1, w2;
        __be32 mask;

        w1 = a1[words];
        w2 = a2[words];

        mask = htonl((0xffffffff) << (32 - bits));

        if ((w1 ^ w2) & mask)
            return 0;
    }

    return 1;
}
Exemple #4
0
extern int nla_put_string(struct nlmsg *nlmsg, int attr, const char *string)
{
	return nla_put(nlmsg, attr, string, strlen(string) + 1);
}
Exemple #5
0
extern int nla_put_u16(struct nlmsg *nlmsg, int attr, unsigned short value)
{
	return nla_put(nlmsg, attr, &value, 2);
}
Exemple #6
0
static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
			 int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_csum *p = to_tcf_csum(a);
	struct tc_csum opt = {
		.update_flags = p->update_flags,
		.index   = p->tcf_index,
		.action  = p->tcf_action,
		.refcnt  = p->tcf_refcnt - ref,
		.bindcnt = p->tcf_bindcnt - bind,
	};
	struct tcf_t t;

	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;

	tcf_tm_dump(&t, &p->tcf_tm);
	if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
		goto nla_put_failure;

	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
			   struct netlink_callback *cb, int type,
			   const struct tc_action_ops *ops)
{
	struct tc_action_net *tn = net_generic(net, csum_net_id);

	return tcf_generic_walker(tn, skb, cb, type, ops);
}

static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
{
	struct tc_action_net *tn = net_generic(net, csum_net_id);

	return tcf_hash_search(tn, a, index);
}

static struct tc_action_ops act_csum_ops = {
	.kind		= "csum",
	.type		= TCA_ACT_CSUM,
	.owner		= THIS_MODULE,
	.act		= tcf_csum,
	.dump		= tcf_csum_dump,
	.init		= tcf_csum_init,
	.walk		= tcf_csum_walker,
	.lookup		= tcf_csum_search,
	.size		= sizeof(struct tcf_csum),
};

static __net_init int csum_init_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, csum_net_id);

	return tc_action_net_init(tn, &act_csum_ops, CSUM_TAB_MASK);
}

static void __net_exit csum_exit_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, csum_net_id);

	tc_action_net_exit(tn);
}

static struct pernet_operations csum_net_ops = {
	.init = csum_init_net,
	.exit = csum_exit_net,
	.id   = &csum_net_id,
	.size = sizeof(struct tc_action_net),
};

MODULE_DESCRIPTION("Checksum updating actions");
MODULE_LICENSE("GPL");

static int __init csum_init_module(void)
{
	return tcf_register_action(&act_csum_ops, &csum_net_ops);
}

static void __exit csum_cleanup_module(void)
{
	tcf_unregister_action(&act_csum_ops, &csum_net_ops);
}

module_init(csum_init_module);
module_exit(csum_cleanup_module);
static int wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy,
	struct wireless_dev *wdev, const void  *data, int len)
{
	int err = 0;
	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
	gscan_results_cache_t *results, *iter;
	uint32 reply_len, complete = 0, num_results_iter;
	int32 mem_needed;
	wifi_gscan_result_t *ptr;
	uint16 num_scan_ids, num_results;
	struct sk_buff *skb;
	struct nlattr *scan_hdr;

	dhd_dev_wait_batch_results_complete(bcmcfg_to_prmry_ndev(cfg));
	dhd_dev_pno_lock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
	results = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
	             DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len);

	if (!results) {
		WL_ERR(("No results to send %d\n", err));
		err =  rtw_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
		        results, 0);

		if (unlikely(err))
			WL_ERR(("Vendor Command reply failed ret:%d \n", err));
		dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
		return err;
	}
	num_scan_ids = reply_len & 0xFFFF;
	num_results = (reply_len & 0xFFFF0000) >> 16;
	mem_needed = (num_results * sizeof(wifi_gscan_result_t)) +
	             (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) +
	             VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN;

	if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) {
		mem_needed = (int32)NLMSG_DEFAULT_SIZE;
		complete = 0;
	} else {
		complete = 1;
	}

	WL_TRACE(("complete %d mem_needed %d max_mem %d\n", complete, mem_needed,
		(int)NLMSG_DEFAULT_SIZE));
	/* Alloc the SKB for vendor_event */
	skb = rtw_cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
	if (unlikely(!skb)) {
		WL_ERR(("skb alloc failed"));
		dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
		return -ENOMEM;
	}
	iter = results;

	nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, complete);

	mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD);

	while (iter && ((mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)  > 0)) {
		scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS);
		nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id);
		nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag);
		num_results_iter =
		    (mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)/sizeof(wifi_gscan_result_t);

		if ((iter->tot_count - iter->tot_consumed) < num_results_iter)
			num_results_iter = iter->tot_count - iter->tot_consumed;

		nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter);
		if (num_results_iter) {
			ptr = &iter->results[iter->tot_consumed];
			iter->tot_consumed += num_results_iter;
			nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS,
			 num_results_iter * sizeof(wifi_gscan_result_t), ptr);
		}
		nla_nest_end(skb, scan_hdr);
		mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN +
		    (num_results_iter * sizeof(wifi_gscan_result_t));
		iter = iter->next;
	}

	dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
	dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));

	return rtw_cfg80211_vendor_cmd_reply(skb);
}
Exemple #8
0
static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
				    int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_connmark_info *ci = to_connmark(a);

	struct tc_connmark opt = {
		.index   = ci->tcf_index,
		.refcnt  = ci->tcf_refcnt - ref,
		.bindcnt = ci->tcf_bindcnt - bind,
		.action  = ci->tcf_action,
		.zone   = ci->zone,
	};
	struct tcf_t t;

	if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;

	tcf_tm_dump(&t, &ci->tcf_tm);
	if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t,
			  TCA_CONNMARK_PAD))
		goto nla_put_failure;

	return skb->len;
nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

static int tcf_connmark_walker(struct net *net, struct sk_buff *skb,
			       struct netlink_callback *cb, int type,
			       const struct tc_action_ops *ops)
{
	struct tc_action_net *tn = net_generic(net, connmark_net_id);

	return tcf_generic_walker(tn, skb, cb, type, ops);
}

static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index)
{
	struct tc_action_net *tn = net_generic(net, connmark_net_id);

	return tcf_hash_search(tn, a, index);
}

static struct tc_action_ops act_connmark_ops = {
	.kind		=	"connmark",
	.type		=	TCA_ACT_CONNMARK,
	.owner		=	THIS_MODULE,
	.act		=	tcf_connmark,
	.dump		=	tcf_connmark_dump,
	.init		=	tcf_connmark_init,
	.walk		=	tcf_connmark_walker,
	.lookup		=	tcf_connmark_search,
	.size		=	sizeof(struct tcf_connmark_info),
};

static __net_init int connmark_init_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, connmark_net_id);

	return tc_action_net_init(tn, &act_connmark_ops, CONNMARK_TAB_MASK);
}

static void __net_exit connmark_exit_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, connmark_net_id);

	tc_action_net_exit(tn);
}

static struct pernet_operations connmark_net_ops = {
	.init = connmark_init_net,
	.exit = connmark_exit_net,
	.id   = &connmark_net_id,
	.size = sizeof(struct tc_action_net),
};

static int __init connmark_init_module(void)
{
	return tcf_register_action(&act_connmark_ops, &connmark_net_ops);
}

static void __exit connmark_cleanup_module(void)
{
	tcf_unregister_action(&act_connmark_ops, &connmark_net_ops);
}

module_init(connmark_init_module);
module_exit(connmark_cleanup_module);
MODULE_AUTHOR("Felix Fietkau <*****@*****.**>");
MODULE_DESCRIPTION("Connection tracking mark restoring");
MODULE_LICENSE("GPL");
Exemple #9
0
static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct tc_fifo_qopt opt = { .limit = sch->limit };

	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
		goto nla_put_failure;
	return skb->len;

nla_put_failure:
	return -1;
}

struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
	.id		=	"pfifo",
	.priv_size	=	0,
	.enqueue	=	pfifo_enqueue,
	.dequeue	=	qdisc_dequeue_head,
	.peek		=	qdisc_peek_head,
	.init		=	fifo_init,
	.reset		=	qdisc_reset_queue,
	.change		=	fifo_init,
	.dump		=	fifo_dump,
	.owner		=	THIS_MODULE,
};
EXPORT_SYMBOL(pfifo_qdisc_ops);

struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
	.id		=	"bfifo",
	.priv_size	=	0,
	.enqueue	=	bfifo_enqueue,
	.dequeue	=	qdisc_dequeue_head,
	.peek		=	qdisc_peek_head,
	.init		=	fifo_init,
	.reset		=	qdisc_reset_queue,
	.change		=	fifo_init,
	.dump		=	fifo_dump,
	.owner		=	THIS_MODULE,
};
EXPORT_SYMBOL(bfifo_qdisc_ops);

struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
	.id		=	"pfifo_head_drop",
	.priv_size	=	0,
	.enqueue	=	pfifo_tail_enqueue,
	.dequeue	=	qdisc_dequeue_head,
	.peek		=	qdisc_peek_head,
	.init		=	fifo_init,
	.reset		=	qdisc_reset_queue,
	.change		=	fifo_init,
	.dump		=	fifo_dump,
	.owner		=	THIS_MODULE,
};

/* Pass size change message down to embedded FIFO */
int fifo_set_limit(struct Qdisc *q, unsigned int limit)
{
	struct nlattr *nla;
	int ret = -ENOMEM;

	/* Hack to avoid sending change message to non-FIFO */
	if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
		return 0;

	nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
	if (nla) {
		nla->nla_type = RTM_NEWQDISC;
		nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
		((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;

		ret = q->ops->change(q, nla);
		kfree(nla);
	}
	return ret;
}
EXPORT_SYMBOL(fifo_set_limit);

struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
			       unsigned int limit)
{
	struct Qdisc *q;
	int err = -ENOMEM;

	q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1));
	if (q) {
		err = fifo_set_limit(q, limit);
		if (err < 0) {
			qdisc_destroy(q);
			q = NULL;
		}
	}

	return q ? : ERR_PTR(err);
}
EXPORT_SYMBOL(fifo_create_dflt);
Exemple #10
0
int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
{
	struct sk_buff *reply;
	struct bcm_cfg80211 *cfg;
	dhd_pub_t *dhd;
	struct bcm_nlmsg_hdr *nlioc = data;
	dhd_ioctl_t ioc = { 0 };
	int err = 0;
	void *buf = NULL, *cur;
	u16 buflen;
	u16 maxmsglen = PAGE_SIZE - 0x100;
	bool newbuf = false;
	int8 index = 0;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
	struct net_device *ndev = NULL;
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */

	WL_TRACE(("entry: cmd = %d\n", nlioc->cmd));
	cfg = wiphy_priv(wiphy);
	dhd = cfg->pub;

	DHD_OS_WAKE_LOCK(dhd);

	/* send to dongle only if we are not waiting for reload already */
	if (dhd->hang_was_sent) {
		WL_ERR(("HANG was sent up earlier\n"));
		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS);
		DHD_OS_WAKE_UNLOCK(dhd);
		return OSL_ERROR(BCME_DONGLE_DOWN);
	}

	len -= sizeof(struct bcm_nlmsg_hdr);

	if (nlioc->len > 0) {
		if (nlioc->len <= len) {
			buf = (void *)nlioc + nlioc->offset;
			*(char *)(buf + nlioc->len) = '\0';
		} else {
			if (nlioc->len > DHD_IOCTL_MAXLEN)
				nlioc->len = DHD_IOCTL_MAXLEN;
			buf = vzalloc(nlioc->len);
			if (!buf)
				return -ENOMEM;
			newbuf = true;
			memcpy(buf, (void *)nlioc + nlioc->offset, len);
			*(char *)(buf + len) = '\0';
		}
	}

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
	ndev = wdev_to_wlc_ndev(wdev, cfg);
	index = dhd_net2idx(dhd->info, ndev);
	if (index == DHD_BAD_IF) {
		WL_ERR(("Bad ifidx from wdev:%p\n", wdev));
		return BCME_ERROR;
	}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */

	ioc.cmd = nlioc->cmd;
	ioc.len = nlioc->len;
	ioc.set = nlioc->set;
	ioc.driver = nlioc->magic;
	err = dhd_ioctl_process(dhd, index, &ioc, buf);
	if (err) {
		WL_TRACE(("dhd_ioctl_process return err %d\n", err));
		err = OSL_ERROR(err);
		goto done;
	}

	cur = buf;
	while (nlioc->len > 0) {
		buflen = nlioc->len > maxmsglen ? maxmsglen : nlioc->len;
		nlioc->len -= buflen;
		reply = cfg80211_testmode_alloc_reply_skb(wiphy, buflen+4);
		if (!reply) {
			WL_ERR(("Failed to allocate reply msg\n"));
			err = -ENOMEM;
			break;
		}

		if (nla_put(reply, BCM_NLATTR_DATA, buflen, cur) ||
			nla_put_u16(reply, BCM_NLATTR_LEN, buflen)) {
			kfree_skb(reply);
			err = -ENOBUFS;
			break;
		}

		do {
			err = cfg80211_testmode_reply(reply);
		} while (err == -EAGAIN);
		if (err) {
			WL_ERR(("testmode reply failed:%d\n", err));
			break;
		}
		cur += buflen;
	}

done:
	if (newbuf)
		vfree(buf);
	DHD_OS_WAKE_UNLOCK(dhd);
	return err;
}
Exemple #11
0
static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a,
			   int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_sample *s = to_sample(a);
	struct tc_sample opt = {
		.index      = s->tcf_index,
		.action     = s->tcf_action,
		.refcnt     = s->tcf_refcnt - ref,
		.bindcnt    = s->tcf_bindcnt - bind,
	};
	struct tcf_t t;

	if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;

	tcf_tm_dump(&t, &s->tcf_tm);
	if (nla_put_64bit(skb, TCA_SAMPLE_TM, sizeof(t), &t, TCA_SAMPLE_PAD))
		goto nla_put_failure;

	if (nla_put_u32(skb, TCA_SAMPLE_RATE, s->rate))
		goto nla_put_failure;

	if (s->truncate)
		if (nla_put_u32(skb, TCA_SAMPLE_TRUNC_SIZE, s->trunc_size))
			goto nla_put_failure;

	if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num))
		goto nla_put_failure;
	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

static int tcf_sample_walker(struct net *net, struct sk_buff *skb,
			     struct netlink_callback *cb, int type,
			     const struct tc_action_ops *ops)
{
	struct tc_action_net *tn = net_generic(net, sample_net_id);

	return tcf_generic_walker(tn, skb, cb, type, ops);
}

static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index)
{
	struct tc_action_net *tn = net_generic(net, sample_net_id);

	return tcf_idr_search(tn, a, index);
}

static struct tc_action_ops act_sample_ops = {
	.kind	  = "sample",
	.type	  = TCA_ACT_SAMPLE,
	.owner	  = THIS_MODULE,
	.act	  = tcf_sample_act,
	.dump	  = tcf_sample_dump,
	.init	  = tcf_sample_init,
	.cleanup  = tcf_sample_cleanup,
	.walk	  = tcf_sample_walker,
	.lookup	  = tcf_sample_search,
	.size	  = sizeof(struct tcf_sample),
};

static __net_init int sample_init_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, sample_net_id);

	return tc_action_net_init(tn, &act_sample_ops);
}

static void __net_exit sample_exit_net(struct list_head *net_list)
{
	tc_action_net_exit(net_list, sample_net_id);
}

static struct pernet_operations sample_net_ops = {
	.init = sample_init_net,
	.exit_batch = sample_exit_net,
	.id   = &sample_net_id,
	.size = sizeof(struct tc_action_net),
};

static int __init sample_init_module(void)
{
	return tcf_register_action(&act_sample_ops, &sample_net_ops);
}

static void __exit sample_cleanup_module(void)
{
	tcf_unregister_action(&act_sample_ops, &sample_net_ops);
}

module_init(sample_init_module);
module_exit(sample_cleanup_module);

MODULE_AUTHOR("Yotam Gigi <*****@*****.**>");
MODULE_DESCRIPTION("Packet sampling action");
MODULE_LICENSE("GPL v2");
static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_gact *gact = a->priv;
	struct tc_gact opt = {
		.index   = gact->tcf_index,
		.refcnt  = gact->tcf_refcnt - ref,
		.bindcnt = gact->tcf_bindcnt - bind,
		.action  = gact->tcf_action,
	};
	struct tcf_t t;

	if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;
#ifdef CONFIG_GACT_PROB
	if (gact->tcfg_ptype) {
		struct tc_gact_p p_opt = {
			.paction = gact->tcfg_paction,
			.pval    = gact->tcfg_pval,
			.ptype   = gact->tcfg_ptype,
		};

		if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt))
			goto nla_put_failure;
	}
#endif
	t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
	t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
	t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
	if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t))
		goto nla_put_failure;
	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

static struct tc_action_ops act_gact_ops = {
	.kind		=	"gact",
	.hinfo		=	&gact_hash_info,
	.type		=	TCA_ACT_GACT,
	.capab		=	TCA_CAP_NONE,
	.owner		=	THIS_MODULE,
	.act		=	tcf_gact,
	.dump		=	tcf_gact_dump,
	.cleanup	=	tcf_gact_cleanup,
	.lookup		=	tcf_hash_search,
	.init		=	tcf_gact_init,
	.walk		=	tcf_generic_walker
};

MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
MODULE_DESCRIPTION("Generic Classifier actions");
MODULE_LICENSE("GPL");

static int __init gact_init_module(void)
{
#ifdef CONFIG_GACT_PROB
	pr_info("GACT probability on\n");
#else
	pr_info("GACT probability NOT on\n");
#endif
	return tcf_register_action(&act_gact_ops);
}

static void __exit gact_cleanup_module(void)
{
	tcf_unregister_action(&act_gact_ops);
}

module_init(gact_init_module);
module_exit(gact_cleanup_module);
Exemple #13
0
static int handle_bitrates(struct nl80211_state *state,
			   struct nl_cb *cb,
			   struct nl_msg *msg,
			   int argc, char **argv)
{
	struct nlattr *nl_rates, *nl_band;
	int i;
	bool have_legacy_24 = false, have_legacy_5 = false;
	uint8_t legacy_24[32], legacy_5[32];
	int n_legacy_24 = 0, n_legacy_5 = 0;
	uint8_t *legacy = NULL;
	int *n_legacy = NULL;
	bool have_mcs_24 = false, have_mcs_5 = false;
#ifdef NL80211_TXRATE_MCS
	uint8_t mcs_24[77], mcs_5[77];
	int n_mcs_24 = 0, n_mcs_5 = 0;
	uint8_t *mcs = NULL;
	int *n_mcs = NULL;
#endif
	enum {
		S_NONE,
		S_LEGACY,
		S_MCS,
	} parser_state = S_NONE;

	for (i = 0; i < argc; i++) {
		char *end;
		double tmpd;
#ifdef NL80211_TXRATE_MCS
		long tmpl;
#endif

		if (strcmp(argv[i], "legacy-2.4") == 0) {
			if (have_legacy_24)
				return 1;
			parser_state = S_LEGACY;
			legacy = legacy_24;
			n_legacy = &n_legacy_24;
			have_legacy_24 = true;
		} else if (strcmp(argv[i], "legacy-5") == 0) {
			if (have_legacy_5)
				return 1;
			parser_state = S_LEGACY;
			legacy = legacy_5;
			n_legacy = &n_legacy_5;
			have_legacy_5 = true;
		}
#ifdef NL80211_TXRATE_MCS
		else if (strcmp(argv[i], "mcs-2.4") == 0) {
			if (have_mcs_24)
				return 1;
			parser_state = S_MCS;
			mcs = mcs_24;
			n_mcs = &n_mcs_24;
			have_mcs_24 = true;
		} else if (strcmp(argv[i], "mcs-5") == 0) {
			if (have_mcs_5)
				return 1;
			parser_state = S_MCS;
			mcs = mcs_5;
			n_mcs = &n_mcs_5;
			have_mcs_5 = true;
		}
#endif
		else switch (parser_state) {
		case S_LEGACY:
			tmpd = strtod(argv[i], &end);
			if (*end != '\0')
				return 1;
			if (tmpd < 1 || tmpd > 255 * 2)
				return 1;
			legacy[(*n_legacy)++] = tmpd * 2;
			break;
		case S_MCS:
#ifdef NL80211_TXRATE_MCS
			tmpl = strtol(argv[i], &end, 0);
			if (*end != '\0')
				return 1;
			if (tmpl < 0 || tmpl > 255)
				return 1;
			mcs[(*n_mcs)++] = tmpl;
			break;
#endif
		default:
			return 1;
		}
	}

	nl_rates = nla_nest_start(msg, NL80211_ATTR_TX_RATES);
	if (!nl_rates)
		goto nla_put_failure;

	if (have_legacy_24 || have_mcs_24) {
		nl_band = nla_nest_start(msg, NL80211_BAND_2GHZ);
		if (!nl_band)
			goto nla_put_failure;
		if (have_legacy_24)
			nla_put(msg, NL80211_TXRATE_LEGACY, n_legacy_24, legacy_24);
#ifdef NL80211_TXRATE_MCS
		if (have_mcs_24)
			nla_put(msg, NL80211_TXRATE_MCS, n_mcs_24, mcs_24);
#endif
		nla_nest_end(msg, nl_band);
	}

	if (have_legacy_5 || have_mcs_5) {
		nl_band = nla_nest_start(msg, NL80211_BAND_5GHZ);
		if (!nl_band)
			goto nla_put_failure;
		if (have_legacy_5)
			nla_put(msg, NL80211_TXRATE_LEGACY, n_legacy_5, legacy_5);
#ifdef NL80211_TXRATE_MCS
		if (have_mcs_5)
			nla_put(msg, NL80211_TXRATE_MCS, n_mcs_5, mcs_5);
#endif
		nla_nest_end(msg, nl_band);
	}

	nla_nest_end(msg, nl_rates);

	return 0;
 nla_put_failure:
	return -ENOBUFS;
}
Exemple #14
0
s32
wl_genl_send_msg(
	struct net_device *ndev,
	u32 event_type,
	u8 *buf,
	u16 len,
	u8 *subhdr,
	u16 subhdr_len)
{
	int ret = 0;
	struct sk_buff *skb;
	void *msg;
	u32 attr_type = 0;
	bcm_event_hdr_t *hdr = NULL;
	int mcast = 1; /* By default sent as mutlicast type */
	int pid = 0;
	u8 *ptr = NULL, *p = NULL;
	u32 tot_len = sizeof(bcm_event_hdr_t) + subhdr_len + len;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;


	WL_DBG(("Enter \n"));

	/* Decide between STRING event and Data event */
	if (event_type == 0)
		attr_type = BCM_GENL_ATTR_STRING;
	else
		attr_type = BCM_GENL_ATTR_MSG;

	skb = genlmsg_new(NLMSG_GOODSIZE, kflags);
	if (skb == NULL) {
		ret = -ENOMEM;
		goto out;
	}

	msg = genlmsg_put(skb, 0, 0, &wl_genl_family, 0, BCM_GENL_CMD_MSG);
	if (msg == NULL) {
		ret = -ENOMEM;
		goto out;
	}


	if (attr_type == BCM_GENL_ATTR_STRING) {
		/* Add a BCM_GENL_MSG attribute. Since it is specified as a string.
		 * make sure it is null terminated
		 */
		if (subhdr || subhdr_len) {
			WL_ERR(("No sub hdr support for the ATTR STRING type \n"));
			ret =  -EINVAL;
			goto out;
		}

		ret = nla_put_string(skb, BCM_GENL_ATTR_STRING, buf);
		if (ret != 0) {
			WL_ERR(("nla_put_string failed\n"));
			goto out;
		}
	} else {
		/* ATTR_MSG */

		/* Create a single buffer for all */
		p = ptr = kzalloc(tot_len, kflags);
		if (!ptr) {
			ret = -ENOMEM;
			WL_ERR(("ENOMEM!!\n"));
			goto out;
		}

		/* Include the bcm event header */
		hdr = (bcm_event_hdr_t *)ptr;
		hdr->event_type = wl_event_to_bcm_event(event_type);
		hdr->len = len + subhdr_len;
		ptr += sizeof(bcm_event_hdr_t);

		/* Copy subhdr (if any) */
		if (subhdr && subhdr_len) {
			memcpy(ptr, subhdr, subhdr_len);
			ptr += subhdr_len;
		}

		/* Copy the data */
		if (buf && len) {
			memcpy(ptr, buf, len);
		}

		ret = nla_put(skb, BCM_GENL_ATTR_MSG, tot_len, p);
		if (ret != 0) {
			WL_ERR(("nla_put_string failed\n"));
			goto out;
		}
	}

	if (mcast) {
		int err = 0;
		/* finalize the message */
		genlmsg_end(skb, msg);
		/* NETLINK_CB(skb).dst_group = 1; */
		if ((err = genlmsg_multicast(skb, 0, wl_genl_mcast.id, GFP_ATOMIC)) < 0)
			WL_ERR(("genlmsg_multicast for attr(%d) failed. Error:%d \n",
				attr_type, err));
		else
			WL_DBG(("Multicast msg sent successfully. attr_type:%d len:%d \n",
				attr_type, tot_len));
	} else {
		NETLINK_CB(skb).dst_group = 0; /* Not in multicast group */

		/* finalize the message */
		genlmsg_end(skb, msg);

		/* send the message back */
		if (genlmsg_unicast(&init_net, skb, pid) < 0)
			WL_ERR(("genlmsg_unicast failed\n"));
	}

out:
	if (p)
		kfree(p);
	if (ret)
		nlmsg_free(skb);

	return ret;
}
Exemple #15
0
static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
			int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_nat *p = to_tcf_nat(a);
	struct tc_nat opt = {
		.index    = p->tcf_index,
		.refcnt   = refcount_read(&p->tcf_refcnt) - ref,
		.bindcnt  = atomic_read(&p->tcf_bindcnt) - bind,
	};
	struct tcf_t t;

	spin_lock_bh(&p->tcf_lock);
	opt.old_addr = p->old_addr;
	opt.new_addr = p->new_addr;
	opt.mask = p->mask;
	opt.flags = p->flags;
	opt.action = p->tcf_action;

	if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;

	tcf_tm_dump(&t, &p->tcf_tm);
	if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
		goto nla_put_failure;
	spin_unlock_bh(&p->tcf_lock);

	return skb->len;

nla_put_failure:
	spin_unlock_bh(&p->tcf_lock);
	nlmsg_trim(skb, b);
	return -1;
}

static int tcf_nat_walker(struct net *net, struct sk_buff *skb,
			  struct netlink_callback *cb, int type,
			  const struct tc_action_ops *ops,
			  struct netlink_ext_ack *extack)
{
	struct tc_action_net *tn = net_generic(net, nat_net_id);

	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}

static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index)
{
	struct tc_action_net *tn = net_generic(net, nat_net_id);

	return tcf_idr_search(tn, a, index);
}

static struct tc_action_ops act_nat_ops = {
	.kind		=	"nat",
	.id		=	TCA_ID_NAT,
	.owner		=	THIS_MODULE,
	.act		=	tcf_nat_act,
	.dump		=	tcf_nat_dump,
	.init		=	tcf_nat_init,
	.walk		=	tcf_nat_walker,
	.lookup		=	tcf_nat_search,
	.size		=	sizeof(struct tcf_nat),
};

static __net_init int nat_init_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, nat_net_id);

	return tc_action_net_init(tn, &act_nat_ops);
}

static void __net_exit nat_exit_net(struct list_head *net_list)
{
	tc_action_net_exit(net_list, nat_net_id);
}

static struct pernet_operations nat_net_ops = {
	.init = nat_init_net,
	.exit_batch = nat_exit_net,
	.id   = &nat_net_id,
	.size = sizeof(struct tc_action_net),
};

MODULE_DESCRIPTION("Stateless NAT actions");
MODULE_LICENSE("GPL");

static int __init nat_init_module(void)
{
	return tcf_register_action(&act_nat_ops, &nat_net_ops);
}

static void __exit nat_cleanup_module(void)
{
	tcf_unregister_action(&act_nat_ops, &nat_net_ops);
}

module_init(nat_init_module);
module_exit(nat_cleanup_module);
static int async_encrypt(struct ablkcipher_request *req)
{
	struct crypto_tfm *tfm = req->base.tfm;
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
	struct blkcipher_desc desc = {
		.tfm = __crypto_blkcipher_cast(tfm),
		.info = req->info,
		.flags = req->base.flags,
	};


	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
}

static int async_decrypt(struct ablkcipher_request *req)
{
	struct crypto_tfm *tfm = req->base.tfm;
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
	struct blkcipher_desc desc = {
		.tfm = __crypto_blkcipher_cast(tfm),
		.info = req->info,
		.flags = req->base.flags,
	};

	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
}

static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
					     u32 mask)
{
	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
	unsigned int len = alg->cra_ctxsize;

	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
	    cipher->ivsize) {
		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
		len += cipher->ivsize;
	}

	return len;
}

static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
{
	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;

	crt->setkey = async_setkey;
	crt->encrypt = async_encrypt;
	crt->decrypt = async_decrypt;
	if (!alg->ivsize) {
		crt->givencrypt = skcipher_null_givencrypt;
		crt->givdecrypt = skcipher_null_givdecrypt;
	}
	crt->base = __crypto_ablkcipher_cast(tfm);
	crt->ivsize = alg->ivsize;

	return 0;
}

static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
{
	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
	unsigned long addr;

	crt->setkey = setkey;
	crt->encrypt = alg->encrypt;
	crt->decrypt = alg->decrypt;

	addr = (unsigned long)crypto_tfm_ctx(tfm);
	addr = ALIGN(addr, align);
	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
	crt->iv = (void *)addr;

	return 0;
}

static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;

	if (alg->ivsize > PAGE_SIZE / 8)
		return -EINVAL;

	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
		return crypto_init_blkcipher_ops_sync(tfm);
	else
		return crypto_init_blkcipher_ops_async(tfm);
}

#ifdef CONFIG_NET
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	struct crypto_report_blkcipher rblkcipher;

	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
		sizeof(rblkcipher.geniv));

	rblkcipher.blocksize = alg->cra_blocksize;
	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;

	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
		goto nla_put_failure;
	return 0;

nla_put_failure:
	return -EMSGSIZE;
}
#else
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	return -ENOSYS;
}
#endif

static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
	__attribute__ ((unused));
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
	seq_printf(m, "type         : blkcipher\n");
	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
					     "<default>");
}

const struct crypto_type crypto_blkcipher_type = {
	.ctxsize = crypto_blkcipher_ctxsize,
	.init = crypto_init_blkcipher_ops,
#ifdef CONFIG_PROC_FS
	.show = crypto_blkcipher_show,
#endif
	.report = crypto_blkcipher_report,
};
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);

static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
				const char *name, u32 type, u32 mask)
{
	struct crypto_alg *alg;
	int err;

	type = crypto_skcipher_type(type);
	mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;

	alg = crypto_alg_mod_lookup(name, type, mask);
	if (IS_ERR(alg))
		return PTR_ERR(alg);

	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
	crypto_mod_put(alg);
	return err;
}

struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
					     struct rtattr **tb, u32 type,
					     u32 mask)
{
	struct {
		int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
			      unsigned int keylen);
		int (*encrypt)(struct ablkcipher_request *req);
		int (*decrypt)(struct ablkcipher_request *req);

		unsigned int min_keysize;
		unsigned int max_keysize;
		unsigned int ivsize;

		const char *geniv;
	} balg;
	const char *name;
	struct crypto_skcipher_spawn *spawn;
	struct crypto_attr_type *algt;
	struct crypto_instance *inst;
	struct crypto_alg *alg;
	int err;

	algt = crypto_get_attr_type(tb);
	if (IS_ERR(algt))
		return ERR_CAST(algt);

	if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
	    algt->mask)
		return ERR_PTR(-EINVAL);

	name = crypto_attr_alg_name(tb[1]);
	if (IS_ERR(name))
		return ERR_CAST(name);

	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
	if (!inst)
		return ERR_PTR(-ENOMEM);

	spawn = crypto_instance_ctx(inst);

	/* Ignore async algorithms if necessary. */
	mask |= crypto_requires_sync(algt->type, algt->mask);

	crypto_set_skcipher_spawn(spawn, inst);
	err = crypto_grab_nivcipher(spawn, name, type, mask);
	if (err)
		goto err_free_inst;

	alg = crypto_skcipher_spawn_alg(spawn);

	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
	    CRYPTO_ALG_TYPE_BLKCIPHER) {
		balg.ivsize = alg->cra_blkcipher.ivsize;
		balg.min_keysize = alg->cra_blkcipher.min_keysize;
		balg.max_keysize = alg->cra_blkcipher.max_keysize;

		balg.setkey = async_setkey;
		balg.encrypt = async_encrypt;
		balg.decrypt = async_decrypt;

		balg.geniv = alg->cra_blkcipher.geniv;
	} else {
		balg.ivsize = alg->cra_ablkcipher.ivsize;
		balg.min_keysize = alg->cra_ablkcipher.min_keysize;
		balg.max_keysize = alg->cra_ablkcipher.max_keysize;

		balg.setkey = alg->cra_ablkcipher.setkey;
		balg.encrypt = alg->cra_ablkcipher.encrypt;
		balg.decrypt = alg->cra_ablkcipher.decrypt;

		balg.geniv = alg->cra_ablkcipher.geniv;
	}

	err = -EINVAL;
	if (!balg.ivsize)
		goto err_drop_alg;

	/*
	 * This is only true if we're constructing an algorithm with its
	 * default IV generator.  For the default generator we elide the
	 * template name and double-check the IV generator.
	 */
	if (algt->mask & CRYPTO_ALG_GENIV) {
		if (!balg.geniv)
			balg.geniv = crypto_default_geniv(alg);
		err = -EAGAIN;
		if (strcmp(tmpl->name, balg.geniv))
			goto err_drop_alg;

		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
		       CRYPTO_MAX_ALG_NAME);
	} else {
		err = -ENAMETOOLONG;
		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
			     "%s(%s)", tmpl->name, alg->cra_name) >=
		    CRYPTO_MAX_ALG_NAME)
			goto err_drop_alg;
		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
		    CRYPTO_MAX_ALG_NAME)
			goto err_drop_alg;
	}

	inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
	inst->alg.cra_priority = alg->cra_priority;
	inst->alg.cra_blocksize = alg->cra_blocksize;
	inst->alg.cra_alignmask = alg->cra_alignmask;
	inst->alg.cra_type = &crypto_givcipher_type;

	inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
	inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
	inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
	inst->alg.cra_ablkcipher.geniv = balg.geniv;

	inst->alg.cra_ablkcipher.setkey = balg.setkey;
	inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
	inst->alg.cra_ablkcipher.decrypt = balg.decrypt;

out:
	return inst;

err_drop_alg:
	crypto_drop_skcipher(spawn);
err_free_inst:
	kfree(inst);
	inst = ERR_PTR(err);
	goto out;
}
EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);

void skcipher_geniv_free(struct crypto_instance *inst)
{
	crypto_drop_skcipher(crypto_instance_ctx(inst));
	kfree(inst);
}
static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
			 int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_vlan *v = a->priv;
	struct tc_vlan opt = {
		.index    = v->tcf_index,
		.refcnt   = v->tcf_refcnt - ref,
		.bindcnt  = v->tcf_bindcnt - bind,
		.action   = v->tcf_action,
		.v_action = v->tcfv_action,
	};
	struct tcf_t t;

	if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;

	if (v->tcfv_action == TCA_VLAN_ACT_PUSH &&
	    (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) ||
	     nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, v->tcfv_push_proto)))
		goto nla_put_failure;

	t.install = jiffies_to_clock_t(jiffies - v->tcf_tm.install);
	t.lastuse = jiffies_to_clock_t(jiffies - v->tcf_tm.lastuse);
	t.expires = jiffies_to_clock_t(v->tcf_tm.expires);
	if (nla_put(skb, TCA_VLAN_TM, sizeof(t), &t))
		goto nla_put_failure;
	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

static int tcf_vlan_walker(struct net *net, struct sk_buff *skb,
			   struct netlink_callback *cb, int type,
			   struct tc_action *a)
{
	struct tc_action_net *tn = net_generic(net, vlan_net_id);

	return tcf_generic_walker(tn, skb, cb, type, a);
}

static int tcf_vlan_search(struct net *net, struct tc_action *a, u32 index)
{
	struct tc_action_net *tn = net_generic(net, vlan_net_id);

	return tcf_hash_search(tn, a, index);
}

static struct tc_action_ops act_vlan_ops = {
	.kind		=	"vlan",
	.type		=	TCA_ACT_VLAN,
	.owner		=	THIS_MODULE,
	.act		=	tcf_vlan,
	.dump		=	tcf_vlan_dump,
	.init		=	tcf_vlan_init,
	.walk		=	tcf_vlan_walker,
	.lookup		=	tcf_vlan_search,
};

static __net_init int vlan_init_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, vlan_net_id);

	return tc_action_net_init(tn, &act_vlan_ops, VLAN_TAB_MASK);
}

static void __net_exit vlan_exit_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, vlan_net_id);

	tc_action_net_exit(tn);
}

static struct pernet_operations vlan_net_ops = {
	.init = vlan_init_net,
	.exit = vlan_exit_net,
	.id   = &vlan_net_id,
	.size = sizeof(struct tc_action_net),
};

static int __init vlan_init_module(void)
{
	return tcf_register_action(&act_vlan_ops, &vlan_net_ops);
}

static void __exit vlan_cleanup_module(void)
{
	tcf_unregister_action(&act_vlan_ops, &vlan_net_ops);
}

module_init(vlan_init_module);
module_exit(vlan_cleanup_module);

MODULE_AUTHOR("Jiri Pirko <*****@*****.**>");
MODULE_DESCRIPTION("vlan manipulation actions");
MODULE_LICENSE("GPL v2");
Exemple #18
0
static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
	struct can_priv *priv = netdev_priv(dev);
	struct can_ctrlmode cm = {.flags = priv->ctrlmode};
	struct can_berr_counter bec;
	enum can_state state = priv->state;

	if (priv->do_get_state)
		priv->do_get_state(dev, &state);

	if ((priv->bittiming.bitrate &&
	     nla_put(skb, IFLA_CAN_BITTIMING,
		     sizeof(priv->bittiming), &priv->bittiming)) ||

	    (priv->bittiming_const &&
	     nla_put(skb, IFLA_CAN_BITTIMING_CONST,
		     sizeof(*priv->bittiming_const), priv->bittiming_const)) ||

	    nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
	    nla_put_u32(skb, IFLA_CAN_STATE, state) ||
	    nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
	    nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||

	    (priv->do_get_berr_counter &&
	     !priv->do_get_berr_counter(dev, &bec) &&
	     nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||

	    (priv->data_bittiming.bitrate &&
	     nla_put(skb, IFLA_CAN_DATA_BITTIMING,
		     sizeof(priv->data_bittiming), &priv->data_bittiming)) ||

	    (priv->data_bittiming_const &&
	     nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
		     sizeof(*priv->data_bittiming_const),
		     priv->data_bittiming_const)))
		return -EMSGSIZE;

	return 0;
}

static size_t can_get_xstats_size(const struct net_device *dev)
{
	return sizeof(struct can_device_stats);
}

static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
{
	struct can_priv *priv = netdev_priv(dev);

	if (nla_put(skb, IFLA_INFO_XSTATS,
		    sizeof(priv->can_stats), &priv->can_stats))
		goto nla_put_failure;
	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

static int can_newlink(struct net *src_net, struct net_device *dev,
		       struct nlattr *tb[], struct nlattr *data[])
{
	return -EOPNOTSUPP;
}

static struct rtnl_link_ops can_link_ops __read_mostly = {
	.kind		= "can",
	.maxtype	= IFLA_CAN_MAX,
	.policy		= can_policy,
	.setup		= can_setup,
	.newlink	= can_newlink,
	.changelink	= can_changelink,
	.get_size	= can_get_size,
	.fill_info	= can_fill_info,
	.get_xstats_size = can_get_xstats_size,
	.fill_xstats	= can_fill_xstats,
};

/*
 * Register the CAN network device
 */
int register_candev(struct net_device *dev)
{
	dev->rtnl_link_ops = &can_link_ops;
	return register_netdev(dev);
}
EXPORT_SYMBOL_GPL(register_candev);

/*
 * Unregister the CAN network device
 */
void unregister_candev(struct net_device *dev)
{
	unregister_netdev(dev);
}
Exemple #19
0
static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
			   int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_tunnel_key *t = to_tunnel_key(a);
	struct tcf_tunnel_key_params *params;
	struct tc_tunnel_key opt = {
		.index    = t->tcf_index,
		.refcnt   = t->tcf_refcnt - ref,
		.bindcnt  = t->tcf_bindcnt - bind,
	};
	struct tcf_t tm;

	params = rtnl_dereference(t->params);

	opt.t_action = params->tcft_action;
	opt.action = params->action;

	if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;

	if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
		struct ip_tunnel_key *key =
			&params->tcft_enc_metadata->u.tun_info.key;
		__be32 key_id = tunnel_id_to_key32(key->tun_id);

		if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) ||
		    tunnel_key_dump_addresses(skb,
					      &params->tcft_enc_metadata->u.tun_info) ||
		    nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, key->tp_dst))
			goto nla_put_failure;
	}

	tcf_tm_dump(&tm, &t->tcf_tm);
	if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
			  &tm, TCA_TUNNEL_KEY_PAD))
		goto nla_put_failure;

	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
			     struct netlink_callback *cb, int type,
			     const struct tc_action_ops *ops)
{
	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);

	return tcf_generic_walker(tn, skb, cb, type, ops);
}

static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
{
	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);

	return tcf_hash_search(tn, a, index);
}

static struct tc_action_ops act_tunnel_key_ops = {
	.kind		=	"tunnel_key",
	.type		=	TCA_ACT_TUNNEL_KEY,
	.owner		=	THIS_MODULE,
	.act		=	tunnel_key_act,
	.dump		=	tunnel_key_dump,
	.init		=	tunnel_key_init,
	.cleanup	=	tunnel_key_release,
	.walk		=	tunnel_key_walker,
	.lookup		=	tunnel_key_search,
	.size		=	sizeof(struct tcf_tunnel_key),
};

static __net_init int tunnel_key_init_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);

	return tc_action_net_init(tn, &act_tunnel_key_ops, TUNNEL_KEY_TAB_MASK);
}

static void __net_exit tunnel_key_exit_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);

	tc_action_net_exit(tn);
}

static struct pernet_operations tunnel_key_net_ops = {
	.init = tunnel_key_init_net,
	.exit = tunnel_key_exit_net,
	.id   = &tunnel_key_net_id,
	.size = sizeof(struct tc_action_net),
};

static int __init tunnel_key_init_module(void)
{
	return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
}

static void __exit tunnel_key_cleanup_module(void)
{
	tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
}

module_init(tunnel_key_init_module);
module_exit(tunnel_key_cleanup_module);

MODULE_AUTHOR("Amir Vadai <*****@*****.**>");
MODULE_DESCRIPTION("ip tunnel manipulation actions");
MODULE_LICENSE("GPL v2");
Exemple #20
0
static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
			    int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_skbedit *d = a->priv;
	struct tc_skbedit opt = {
		.index   = d->tcf_index,
		.refcnt  = d->tcf_refcnt - ref,
		.bindcnt = d->tcf_bindcnt - bind,
		.action  = d->tcf_action,
	};
	struct tcf_t t;

	if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;
	if ((d->flags & SKBEDIT_F_PRIORITY) &&
	    nla_put(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
		    &d->priority))
		goto nla_put_failure;
	if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
	    nla_put(skb, TCA_SKBEDIT_QUEUE_MAPPING,
		    sizeof(d->queue_mapping), &d->queue_mapping))
		goto nla_put_failure;
	if ((d->flags & SKBEDIT_F_MARK) &&
	    nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
		    &d->mark))
		goto nla_put_failure;

	tcf_tm_dump(&t, &d->tcf_tm);
	if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
		goto nla_put_failure;
	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
			      struct netlink_callback *cb, int type,
			      struct tc_action *a)
{
	struct tc_action_net *tn = net_generic(net, skbedit_net_id);

	return tcf_generic_walker(tn, skb, cb, type, a);
}

static int tcf_skbedit_search(struct net *net, struct tc_action *a, u32 index)
{
	struct tc_action_net *tn = net_generic(net, skbedit_net_id);

	return tcf_hash_search(tn, a, index);
}

static struct tc_action_ops act_skbedit_ops = {
	.kind		=	"skbedit",
	.type		=	TCA_ACT_SKBEDIT,
	.owner		=	THIS_MODULE,
	.act		=	tcf_skbedit,
	.dump		=	tcf_skbedit_dump,
	.init		=	tcf_skbedit_init,
	.walk		=	tcf_skbedit_walker,
	.lookup		=	tcf_skbedit_search,
};

static __net_init int skbedit_init_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, skbedit_net_id);

	return tc_action_net_init(tn, &act_skbedit_ops, SKBEDIT_TAB_MASK);
}

static void __net_exit skbedit_exit_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, skbedit_net_id);

	tc_action_net_exit(tn);
}

static struct pernet_operations skbedit_net_ops = {
	.init = skbedit_init_net,
	.exit = skbedit_exit_net,
	.id   = &skbedit_net_id,
	.size = sizeof(struct tc_action_net),
};

MODULE_AUTHOR("Alexander Duyck, <*****@*****.**>");
MODULE_DESCRIPTION("SKB Editing");
MODULE_LICENSE("GPL");

static int __init skbedit_init_module(void)
{
	return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops);
}

static void __exit skbedit_cleanup_module(void)
{
	tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops);
}

module_init(skbedit_init_module);
module_exit(skbedit_cleanup_module);
Exemple #21
0
extern int nla_put_buffer(struct nlmsg *nlmsg, int attr,
			  const void *data, size_t size)
{
	return nla_put(nlmsg, attr, data, size);
}
/**
 * virNetDevMacVLanCreate:
 *
 * @ifname: The name the interface is supposed to have; optional parameter
 * @type: The type of device, i.e., "macvtap", "macvlan"
 * @macaddress: The MAC address of the device
 * @srcdev: The name of the 'link' device
 * @macvlan_mode: The macvlan mode to use
 * @retry: Pointer to integer that will be '1' upon return if an interface
 *         with the same name already exists and it is worth to try
 *         again with a different name
 *
 * Create a macvtap device with the given properties.
 *
 * Returns 0 on success, -1 on fatal error.
 */
int
virNetDevMacVLanCreate(const char *ifname,
                       const char *type,
                       const virMacAddrPtr macaddress,
                       const char *srcdev,
                       uint32_t macvlan_mode,
                       int *retry)
{
    int rc = -1;
    struct nlmsghdr *resp;
    struct nlmsgerr *err;
    struct ifinfomsg ifinfo = { .ifi_family = AF_UNSPEC };
    int ifindex;
    unsigned char *recvbuf = NULL;
    unsigned int recvbuflen;
    struct nl_msg *nl_msg;
    struct nlattr *linkinfo, *info_data;

    if (virNetDevGetIndex(srcdev, &ifindex) < 0)
        return -1;

    *retry = 0;

    nl_msg = nlmsg_alloc_simple(RTM_NEWLINK,
                                NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL);
    if (!nl_msg) {
        virReportOOMError();
        return -1;
    }

    if (nlmsg_append(nl_msg,  &ifinfo, sizeof(ifinfo), NLMSG_ALIGNTO) < 0)
        goto buffer_too_small;

    if (nla_put_u32(nl_msg, IFLA_LINK, ifindex) < 0)
        goto buffer_too_small;

    if (nla_put(nl_msg, IFLA_ADDRESS, VIR_MAC_BUFLEN, macaddress) < 0)
        goto buffer_too_small;

    if (ifname &&
        nla_put(nl_msg, IFLA_IFNAME, strlen(ifname)+1, ifname) < 0)
        goto buffer_too_small;

    if (!(linkinfo = nla_nest_start(nl_msg, IFLA_LINKINFO)))
        goto buffer_too_small;

    if (nla_put(nl_msg, IFLA_INFO_KIND, strlen(type), type) < 0)
        goto buffer_too_small;

    if (macvlan_mode > 0) {
        if (!(info_data = nla_nest_start(nl_msg, IFLA_INFO_DATA)))
            goto buffer_too_small;

        if (nla_put(nl_msg, IFLA_MACVLAN_MODE, sizeof(macvlan_mode),
                    &macvlan_mode) < 0)
            goto buffer_too_small;

        nla_nest_end(nl_msg, info_data);
    }

    nla_nest_end(nl_msg, linkinfo);

    if (virNetlinkCommand(nl_msg, &recvbuf, &recvbuflen, 0, 0,
                          NETLINK_ROUTE, 0) < 0) {
        goto cleanup;
    }

    if (recvbuflen < NLMSG_LENGTH(0) || recvbuf == NULL)
        goto malformed_resp;

    resp = (struct nlmsghdr *)recvbuf;

    switch (resp->nlmsg_type) {
    case NLMSG_ERROR:
        err = (struct nlmsgerr *)NLMSG_DATA(resp);
        if (resp->nlmsg_len < NLMSG_LENGTH(sizeof(*err)))
            goto malformed_resp;

        switch (err->error) {

        case 0:
            break;

        case -EEXIST:
            *retry = 1;
            goto cleanup;

        default:
            virReportSystemError(-err->error,
                                 _("error creating %s type of interface attach to %s"),
                                 type, srcdev);
            goto cleanup;
        }
        break;

    case NLMSG_DONE:
        break;

    default:
        goto malformed_resp;
    }

    rc = 0;
cleanup:
    nlmsg_free(nl_msg);
    VIR_FREE(recvbuf);
    return rc;

malformed_resp:
    virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                   _("malformed netlink response message"));
    goto cleanup;

buffer_too_small:
    virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                   _("allocated netlink buffer is too small"));
    goto cleanup;
}
Exemple #23
0
extern int nla_put_u32(struct nlmsg *nlmsg, int attr, int value)
{
	return nla_put(nlmsg, attr, &value, sizeof(value));
}
/**
 * virNetDevMacVLanDelete:
 *
 * @ifname: Name of the interface
 *
 * Tear down an interface with the given name.
 *
 * Returns 0 on success, -1 on fatal error.
 */
int virNetDevMacVLanDelete(const char *ifname)
{
    int rc = -1;
    struct nlmsghdr *resp;
    struct nlmsgerr *err;
    struct ifinfomsg ifinfo = { .ifi_family = AF_UNSPEC };
    unsigned char *recvbuf = NULL;
    unsigned int recvbuflen;
    struct nl_msg *nl_msg;

    nl_msg = nlmsg_alloc_simple(RTM_DELLINK,
                                NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL);
    if (!nl_msg) {
        virReportOOMError();
        return -1;
    }

    if (nlmsg_append(nl_msg,  &ifinfo, sizeof(ifinfo), NLMSG_ALIGNTO) < 0)
        goto buffer_too_small;

    if (nla_put(nl_msg, IFLA_IFNAME, strlen(ifname)+1, ifname) < 0)
        goto buffer_too_small;

    if (virNetlinkCommand(nl_msg, &recvbuf, &recvbuflen, 0, 0,
                          NETLINK_ROUTE, 0) < 0) {
        goto cleanup;
    }

    if (recvbuflen < NLMSG_LENGTH(0) || recvbuf == NULL)
        goto malformed_resp;

    resp = (struct nlmsghdr *)recvbuf;

    switch (resp->nlmsg_type) {
    case NLMSG_ERROR:
        err = (struct nlmsgerr *)NLMSG_DATA(resp);
        if (resp->nlmsg_len < NLMSG_LENGTH(sizeof(*err)))
            goto malformed_resp;

        if (err->error) {
            virReportSystemError(-err->error,
                                 _("error destroying %s interface"),
                                 ifname);
            goto cleanup;
        }
        break;

    case NLMSG_DONE:
        break;

    default:
        goto malformed_resp;
    }

    rc = 0;
cleanup:
    nlmsg_free(nl_msg);
    VIR_FREE(recvbuf);
    return rc;

malformed_resp:
    virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                   _("malformed netlink response message"));
    goto cleanup;

buffer_too_small:
    virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                   _("allocated netlink buffer is too small"));
    goto cleanup;
}
Exemple #25
0
extern int nla_put_attr(struct nlmsg *nlmsg, int attr)
{
	return nla_put(nlmsg, attr, NULL, 0);
}
Exemple #26
0
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int
__build_packet_message(struct nfnl_log_net *log,
			struct nfulnl_instance *inst,
			const struct sk_buff *skb,
			unsigned int data_len,
			u_int8_t pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const char *prefix, unsigned int plen,
			const struct nfnl_ct_hook *nfnl_ct,
			struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	sk_buff_data_t old_tail = inst->skb->tail;
	struct sock *sk;
	const unsigned char *hwhdrp;

	nlh = nlmsg_put(inst->skb, 0, 0,
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg), 0);
	if (!nlh)
		return -1;
	nfmsg = nlmsg_data(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	memset(&pmsg, 0, sizeof(pmsg));
	pmsg.hw_protocol	= skb->protocol;
	pmsg.hook		= hooknum;

	if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
		goto nla_put_failure;

	if (prefix &&
	    nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
		goto nla_put_failure;

	if (indev) {
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
				 htonl(indev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(indev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			struct net_device *physindev;

			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(indev->ifindex)))
				goto nla_put_failure;

			physindev = nf_bridge_get_physindev(skb);
			if (physindev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(physindev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (outdev) {
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
				 htonl(outdev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(outdev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			struct net_device *physoutdev;

			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(outdev->ifindex)))
				goto nla_put_failure;

			physoutdev = nf_bridge_get_physoutdev(skb);
			if (physoutdev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(physoutdev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (skb->mark &&
	    nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
		goto nla_put_failure;

	if (indev && skb->dev &&
	    skb->mac_header != skb->network_header) {
		struct nfulnl_msg_packet_hw phw;
		int len;

		memset(&phw, 0, sizeof(phw));
		len = dev_parse_header(skb, phw.hw_addr);
		if (len > 0) {
			phw.hw_addrlen = htons(len);
			if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
				goto nla_put_failure;
		}
	}

	if (indev && skb_mac_header_was_set(skb)) {
		if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
		    nla_put_be16(inst->skb, NFULA_HWLEN,
				 htons(skb->dev->hard_header_len)))
			goto nla_put_failure;

		hwhdrp = skb_mac_header(skb);

		if (skb->dev->type == ARPHRD_SIT)
			hwhdrp -= ETH_HLEN;

		if (hwhdrp >= skb->head &&
		    nla_put(inst->skb, NFULA_HWHEADER,
			    skb->dev->hard_header_len, hwhdrp))
			goto nla_put_failure;
	}

	if (skb->tstamp.tv64) {
		struct nfulnl_msg_packet_timestamp ts;
		struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
		ts.sec = cpu_to_be64(kts.tv_sec);
		ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);

		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
			goto nla_put_failure;
	}

	/* UID */
	sk = skb->sk;
	if (sk && sk_fullsock(sk)) {
		read_lock_bh(&sk->sk_callback_lock);
		if (sk->sk_socket && sk->sk_socket->file) {
			struct file *file = sk->sk_socket->file;
			const struct cred *cred = file->f_cred;
			struct user_namespace *user_ns = inst->peer_user_ns;
			__be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
			__be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
			read_unlock_bh(&sk->sk_callback_lock);
			if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
			    nla_put_be32(inst->skb, NFULA_GID, gid))
				goto nla_put_failure;
		} else
			read_unlock_bh(&sk->sk_callback_lock);
	}

	/* local sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ) &&
	    nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
		goto nla_put_failure;

	/* global sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
	    nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
			 htonl(atomic_inc_return(&log->global_seq))))
		goto nla_put_failure;

	if (ct && nfnl_ct->build(inst->skb, ct, ctinfo,
				 NFULA_CT, NFULA_CT_INFO) < 0)
		goto nla_put_failure;

	if (data_len) {
		struct nlattr *nla;
		int size = nla_attr_size(data_len);

		if (skb_tailroom(inst->skb) < nla_total_size(data_len))
			goto nla_put_failure;

		nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
		nla->nla_type = NFULA_PAYLOAD;
		nla->nla_len = size;

		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
			BUG();
	}

	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nla_put_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
Exemple #27
0
static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
			    int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_skbedit *d = a->priv;
	struct tc_skbedit opt = {
		.index   = d->tcf_index,
		.refcnt  = d->tcf_refcnt - ref,
		.bindcnt = d->tcf_bindcnt - bind,
		.action  = d->tcf_action,
	};
	struct tcf_t t;

	if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;
	if ((d->flags & SKBEDIT_F_PRIORITY) &&
	    nla_put(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
		    &d->priority))
		goto nla_put_failure;
	if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
	    nla_put(skb, TCA_SKBEDIT_QUEUE_MAPPING,
		    sizeof(d->queue_mapping), &d->queue_mapping))
		goto nla_put_failure;
	if ((d->flags & SKBEDIT_F_MARK) &&
	    nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
		    &d->mark))
		goto nla_put_failure;
	t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
	t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
	t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
	if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t))
		goto nla_put_failure;
	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

static struct tc_action_ops act_skbedit_ops = {
	.kind		=	"skbedit",
	.hinfo		=	&skbedit_hash_info,
	.type		=	TCA_ACT_SKBEDIT,
	.capab		=	TCA_CAP_NONE,
	.owner		=	THIS_MODULE,
	.act		=	tcf_skbedit,
	.dump		=	tcf_skbedit_dump,
	.cleanup	=	tcf_skbedit_cleanup,
	.init		=	tcf_skbedit_init,
};

MODULE_AUTHOR("Alexander Duyck, <*****@*****.**>");
MODULE_DESCRIPTION("SKB Editing");
MODULE_LICENSE("GPL");

static int __init skbedit_init_module(void)
{
	return tcf_register_action(&act_skbedit_ops);
}

static void __exit skbedit_cleanup_module(void)
{
	tcf_unregister_action(&act_skbedit_ops);
}

module_init(skbedit_init_module);
module_exit(skbedit_cleanup_module);
Exemple #28
0
TError TNlLink::AddXVlan(const std::string &vlantype,
                         const std::string &master,
                         uint32_t type,
                         const std::string &hw,
                         int mtu) {
    TError error = TError::Success();
    int ret;
    uint32_t masterIdx;
    struct nl_msg *msg;
    struct nlattr *linkinfo, *infodata;
    struct ifinfomsg ifi = { 0 };
    struct ether_addr *ea = nullptr;
    auto Name = GetName();

    if (hw.length()) {
        // FIXME THREADS
        ea = ether_aton(hw.c_str());
        if (!ea)
            return TError(EError::Unknown, "Invalid " + vlantype + " mac address " + hw);
    }

    TNlLink masterLink(Nl, master);
    error = masterLink.Load();
    if (error)
        return error;
    masterIdx = masterLink.GetIndex();

    msg = nlmsg_alloc_simple(RTM_NEWLINK, NLM_F_CREATE);
    if (!msg)
        return TError(EError::Unknown, "Unable to add " + vlantype + ": no memory");

    ret = nlmsg_append(msg, &ifi, sizeof(ifi), NLMSG_ALIGNTO);
    if (ret < 0) {
        error = TError(EError::Unknown, "Unable to add " + vlantype + ": " + nl_geterror(ret));
        goto free_msg;
    }

    /* link configuration */
    ret = nla_put(msg, IFLA_LINK, sizeof(uint32_t), &masterIdx);
    if (ret < 0) {
        error = TError(EError::Unknown, std::string("Unable to put IFLA_LINK: ") + nl_geterror(ret));
        goto free_msg;
    }
    ret = nla_put(msg, IFLA_IFNAME, Name.length() + 1, Name.c_str());
    if (ret < 0) {
        error = TError(EError::Unknown, std::string("Unable to put IFLA_IFNAME: ") + nl_geterror(ret));
        goto free_msg;
    }

    if (mtu > 0) {
        ret = nla_put(msg, IFLA_MTU, sizeof(int), &mtu);
        if (ret < 0) {
            error = TError(EError::Unknown, std::string("Unable to put IFLA_MTU: ") + nl_geterror(ret));
            goto free_msg;
        }
    }

    if (ea) {
        struct nl_addr *addr = nl_addr_build(AF_LLC, ea, ETH_ALEN);
        ret = nla_put(msg, IFLA_ADDRESS, nl_addr_get_len(addr), nl_addr_get_binary_addr(addr));
        if (ret < 0) {
            error = TError(EError::Unknown, std::string("Unable to put IFLA_ADDRESS: ") + nl_geterror(ret));
            goto free_msg;
        }
        nl_addr_put(addr);
    }

    /* link type */
    linkinfo = nla_nest_start(msg, IFLA_LINKINFO);
    if (!linkinfo) {
        error = TError(EError::Unknown, "Unable to add " + vlantype + ": can't nest IFLA_LINKINFO");
        goto free_msg;
    }
    ret = nla_put(msg, IFLA_INFO_KIND, vlantype.length() + 1, vlantype.c_str());
    if (ret < 0) {
        error = TError(EError::Unknown, std::string("Unable to put IFLA_INFO_KIND: ") + nl_geterror(ret));
        goto free_msg;
    }

    /* xvlan specific */
    infodata = nla_nest_start(msg, IFLA_INFO_DATA);
    if (!infodata) {
        error = TError(EError::Unknown, "Unable to add " + vlantype + ": can't nest IFLA_INFO_DATA");
        goto free_msg;
    }

    if (vlantype == "macvlan") {
        ret = nla_put(msg, IFLA_MACVLAN_MODE, sizeof(uint32_t), &type);
        if (ret < 0) {
            error = TError(EError::Unknown, std::string("Unable to put IFLA_MACVLAN_MODE: ") + nl_geterror(ret));
            goto free_msg;
        }
#ifdef IFLA_IPVLAN_MAX
    } else if (vlantype == "ipvlan") {
        uint16_t mode = type;
        ret = nla_put(msg, IFLA_IPVLAN_MODE, sizeof(uint16_t), &mode);
        if (ret < 0) {
            error = TError(EError::Unknown, std::string("Unable to put IFLA_IPVLAN_MODE: ") + nl_geterror(ret));
            goto free_msg;
        }
#endif
    }
    nla_nest_end(msg, infodata);
    nla_nest_end(msg, linkinfo);

    L() << "netlink: add " << vlantype << " " << Name << " master " << master
        << " type " << type << " hw " << hw << " mtu " << mtu << std::endl;

    ret = nl_send_sync(GetSock(), msg);
    if (ret)
        return Error(ret, "Cannot add " + vlantype);

    return Load();

free_msg:
    nlmsg_free(msg);
    return error;

}
Exemple #29
0
static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
				    u32 seq, int flags, struct net_device *dev)
{
	void *hdr;
	struct wpan_phy *phy;
	struct ieee802154_mlme_ops *ops;
	__le16 short_addr, pan_id;

	pr_debug("%s\n", __func__);

	hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
			  IEEE802154_LIST_IFACE);
	if (!hdr)
		goto out;

	ops = ieee802154_mlme_ops(dev);
	phy = dev->ieee802154_ptr->wpan_phy;
	BUG_ON(!phy);
	get_device(&phy->dev);

	rtnl_lock();
	short_addr = dev->ieee802154_ptr->short_addr;
	pan_id = dev->ieee802154_ptr->pan_id;
	rtnl_unlock();

	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
	    nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
		    dev->dev_addr) ||
	    nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
	    nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, pan_id))
		goto nla_put_failure;

	if (ops->get_mac_params) {
		struct ieee802154_mac_params params;

		rtnl_lock();
		ops->get_mac_params(dev, &params);
		rtnl_unlock();

		if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER,
			       params.transmit_power / 100) ||
		    nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) ||
		    nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE,
			       params.cca.mode) ||
		    nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL,
				params.cca_ed_level / 100) ||
		    nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES,
			       params.csma_retries) ||
		    nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE,
			       params.min_be) ||
		    nla_put_u8(msg, IEEE802154_ATTR_CSMA_MAX_BE,
			       params.max_be) ||
		    nla_put_s8(msg, IEEE802154_ATTR_FRAME_RETRIES,
			       params.frame_retries))
			goto nla_put_failure;
	}

	wpan_phy_put(phy);
	genlmsg_end(msg, hdr);
	return 0;

nla_put_failure:
	wpan_phy_put(phy);
	genlmsg_cancel(msg, hdr);
out:
	return -EMSGSIZE;
}
Exemple #30
0
static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
				struct l2tp_session *session)
{
	void *hdr;
	struct nlattr *nest;
	struct l2tp_tunnel *tunnel = session->tunnel;
	struct sock *sk = NULL;
	struct l2tp_stats stats;
	unsigned int start;

	sk = tunnel->sock;

	hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
	if (IS_ERR(hdr))
		return PTR_ERR(hdr);

	if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
	    nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
	    nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
	    nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
			session->peer_session_id) ||
	    nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
	    nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) ||
	    nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) ||
	    (session->mru &&
	     nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
		goto nla_put_failure;

	if ((session->ifname && session->ifname[0] &&
	     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
	    (session->cookie_len &&
	     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
		     &session->cookie[0])) ||
	    (session->peer_cookie_len &&
	     nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
		     &session->peer_cookie[0])) ||
	    nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
	    nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
	    nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
#ifdef CONFIG_XFRM
	    (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) &&
	     nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
#endif
	    (session->reorder_timeout &&
	     nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout)))
		goto nla_put_failure;

	nest = nla_nest_start(skb, L2TP_ATTR_STATS);
	if (nest == NULL)
		goto nla_put_failure;

	do {
		start = u64_stats_fetch_begin(&session->stats.syncp);
		stats.tx_packets = session->stats.tx_packets;
		stats.tx_bytes = session->stats.tx_bytes;
		stats.tx_errors = session->stats.tx_errors;
		stats.rx_packets = session->stats.rx_packets;
		stats.rx_bytes = session->stats.rx_bytes;
		stats.rx_errors = session->stats.rx_errors;
		stats.rx_seq_discards = session->stats.rx_seq_discards;
		stats.rx_oos_packets = session->stats.rx_oos_packets;
	} while (u64_stats_fetch_retry(&session->stats.syncp, start));

	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
			stats.rx_seq_discards) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
			stats.rx_oos_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
		goto nla_put_failure;
	nla_nest_end(skb, nest);

	return genlmsg_end(skb, hdr);

 nla_put_failure:
	genlmsg_cancel(skb, hdr);
	return -1;
}