Beispiel #1
0
static int
tfw_bmb_conn_compl(struct sock *sk)
{
	SsProto *proto = (SsProto *)rcu_dereference_sk_user_data(sk);
	TfwBmbTask *task;
	TfwBmbConn *conn;
	int tail;

	BUG_ON(!proto);

	task = &bmb_task[proto->type / nconns];
	conn = &task->conn[proto->type % nconns];

	BUG_ON(conn->proto.type != proto->type);
	BUG_ON(conn->proto.listener != NULL);
	BUG_ON(conn->proto.hooks != &bmb_hooks);
	BUG_ON(conn->sk && conn->sk != sk);

	tail = atomic_read(&task->conn_rd_tail);
	task->conn_rd[tail] = proto->type % nconns;
	atomic_inc(&task->conn_rd_tail);
	atomic_inc(&task->conn_compl);

	wake_up(&task->conn_wq);

	return 0;
}
Beispiel #2
0
static int
tfw_bmb_conn_error(struct sock *sk)
{
	SsProto *proto = (SsProto *)rcu_dereference_sk_user_data(sk);

	BUG_ON(proto == NULL);
	atomic_inc(&bmb_task[proto->type / nconns].conn_error);

	return __update_conn(sk);
}
Beispiel #3
0
static int
__update_conn(struct sock *sk)
{
	SsProto *proto = (SsProto *)rcu_dereference_sk_user_data(sk);
	TfwBmbTask *task;
	TfwBmbConn *conn;

	BUG_ON(proto == NULL);

	task = &bmb_task[proto->type / nconns];
	conn = &task->conn[proto->type % nconns];

	BUG_ON(conn->proto.type != proto->type);
	BUG_ON(conn->proto.listener != NULL);
	BUG_ON(conn->proto.hooks != &bmb_hooks);
	BUG_ON(conn->sk && conn->sk != sk);

	wake_up(&task->conn_wq);

	return 0;
}
Beispiel #4
0
/* Callback from net/ipv4/udp.c to receive packets */
static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
	struct genevehdr *geneveh;
	struct geneve_sock *gs;
	int opts_len;

	/* Need Geneve and inner Ethernet header to be present */
	if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
		goto error;

	/* Return packets with reserved bits set */
	geneveh = geneve_hdr(skb);

	if (unlikely(geneveh->ver != GENEVE_VER))
		goto error;

	if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
		goto error;

	opts_len = geneveh->opt_len * 4;
	if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
				 htons(ETH_P_TEB)))
		goto drop;

	gs = rcu_dereference_sk_user_data(sk);
	if (!gs)
		goto drop;

	gs->rcv(gs, skb);
	return 0;

drop:
	/* Consume bad packet */
	kfree_skb(skb);
	return 0;

error:
	/* Let the UDP layer deal with the skb */
	return 1;
}
Beispiel #5
0
static int
kclient_connect_complete(struct sock *sk)
{
	int descidx;
	kclient_desc_t *desc;
	SsProto *proto = (SsProto *)rcu_dereference_sk_user_data(sk);

	BUG_ON(proto == NULL);

	descidx = proto->type;
	desc = *(kclient_desc + descidx / KCLIENT_NCONNECTS)
			      + descidx % KCLIENT_NCONNECTS;
	BUG_ON(desc->proto.type != descidx);
	BUG_ON(desc->proto.listener != NULL);
	BUG_ON(desc->proto.hooks != &kclient_hooks);
	BUG_ON(desc->sk && (desc->sk != sk));

	desc->flags |= KCLIENT_CONNECT_ESTABLISHED;
	atomic_inc(&kclient_connect_ncomplete);
	wake_up(&kclient_finish_wq);
	return 0;
}
Beispiel #6
0
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
	struct vxlan_sock *vs;
	struct vxlanhdr *vxh;

	/* Need Vxlan and inner Ethernet header to be present */
	if (!pskb_may_pull(skb, VXLAN_HLEN))
		goto error;

	/* Return packets with reserved bits set */
	vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
	if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
	    (vxh->vx_vni & htonl(0xff))) {
		pr_warn("invalid vxlan flags=%#x vni=%#x\n",
			ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
		goto error;
	}

	if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
		goto drop;

	vs = rcu_dereference_sk_user_data(sk);
	if (!vs)
		goto drop;

	vs->rcv(vs, skb, vxh->vx_vni);
	return 0;

drop:
	/* Consume bad packet */
	kfree_skb(skb);
	return 0;

error:
	/* Return non vxlan pkt */
	return 1;
}
Beispiel #7
0
/* tipc_send_msg - enqueue a send request */
static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
			     struct tipc_bearer *b,
			     struct tipc_media_addr *dest)
{
	int ttl, err = 0;
	struct udp_bearer *ub;
	struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value;
	struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
	struct rtable *rt;

	if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
		err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
		if (err)
			goto tx_error;
	}

	skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
	ub = rcu_dereference_rtnl(b->media_ptr);
	if (!ub) {
		err = -ENODEV;
		goto tx_error;
	}
	if (dst->proto == htons(ETH_P_IP)) {
		struct flowi4 fl = {
			.daddr = dst->ipv4.s_addr,
			.saddr = src->ipv4.s_addr,
			.flowi4_mark = skb->mark,
			.flowi4_proto = IPPROTO_UDP
		};
		rt = ip_route_output_key(net, &fl);
		if (IS_ERR(rt)) {
			err = PTR_ERR(rt);
			goto tx_error;
		}

		skb->dev = rt->dst.dev;
		ttl = ip4_dst_hoplimit(&rt->dst);
		udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
				    dst->ipv4.s_addr, 0, ttl, 0, src->port,
				    dst->port, false, true);
#if IS_ENABLED(CONFIG_IPV6)
	} else {
		struct dst_entry *ndst;
		struct flowi6 fl6 = {
			.flowi6_oif = ub->ifindex,
			.daddr = dst->ipv6,
			.saddr = src->ipv6,
			.flowi6_proto = IPPROTO_UDP
		};
		err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst,
						 &fl6);
		if (err)
			goto tx_error;
		ttl = ip6_dst_hoplimit(ndst);
		err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
					   ndst->dev, &src->ipv6,
					   &dst->ipv6, 0, ttl, 0, src->port,
					   dst->port, false);
#endif
	}
	return err;

tx_error:
	kfree_skb(skb);
	return err;
}

/* tipc_udp_recv - read data from bearer socket */
static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
{
	struct udp_bearer *ub;
	struct tipc_bearer *b;

	ub = rcu_dereference_sk_user_data(sk);
	if (!ub) {
		pr_err_ratelimited("Failed to get UDP bearer reference");
		kfree_skb(skb);
		return 0;
	}

	skb_pull(skb, sizeof(struct udphdr));
	rcu_read_lock();
	b = rcu_dereference_rtnl(ub->bearer);

	if (b) {
		tipc_rcv(sock_net(sk), skb, b);
		rcu_read_unlock();
		return 0;
	}
	rcu_read_unlock();
	kfree_skb(skb);
	return 0;
}

static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote)
{
	int err = 0;
	struct ip_mreqn mreqn;
	struct sock *sk = ub->ubsock->sk;

	if (ntohs(remote->proto) == ETH_P_IP) {
		if (!ipv4_is_multicast(remote->ipv4.s_addr))
			return 0;
		mreqn.imr_multiaddr = remote->ipv4;
		mreqn.imr_ifindex = ub->ifindex;
		err = ip_mc_join_group(sk, &mreqn);
#if IS_ENABLED(CONFIG_IPV6)
	} else {
		if (!ipv6_addr_is_multicast(&remote->ipv6))
			return 0;
		err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex,
						   &remote->ipv6);
#endif
	}
	return err;
}
Beispiel #8
0
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
	struct vxlan_sock *vs;
	struct vxlanhdr *vxh;
	u32 flags, vni;
	struct vxlan_metadata md = {0};

	/* Need Vxlan and inner Ethernet header to be present */
	if (!pskb_may_pull(skb, VXLAN_HLEN))
		goto error;

	vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
	flags = ntohl(vxh->vx_flags);
	vni = ntohl(vxh->vx_vni);

	if (flags & VXLAN_HF_VNI) {
		flags &= ~VXLAN_HF_VNI;
	} else {
		/* VNI flag always required to be set */
		goto bad_flags;
	}

	if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
		goto drop;

	vs = rcu_dereference_sk_user_data(sk);
	if (!vs)
		goto drop;

	/* For backwards compatibility, only allow reserved fields to be
	* used by VXLAN extensions if explicitly requested.
	*/
	if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
		struct vxlanhdr_gbp *gbp;

		gbp = (struct vxlanhdr_gbp *)vxh;
		md.gbp = ntohs(gbp->policy_id);

		if (gbp->dont_learn)
			md.gbp |= VXLAN_GBP_DONT_LEARN;

		if (gbp->policy_applied)
			md.gbp |= VXLAN_GBP_POLICY_APPLIED;

		flags &= ~VXLAN_GBP_USED_BITS;
	}

	if (flags || (vni & 0xff)) {
		/* If there are any unprocessed flags remaining treat
		* this as a malformed packet. This behavior diverges from
		* VXLAN RFC (RFC7348) which stipulates that bits in reserved
		* in reserved fields are to be ignored. The approach here
		* maintains compatbility with previous stack code, and also
		* is more robust and provides a little more security in
		* adding extensions to VXLAN.
		*/

		goto bad_flags;
	}

	md.vni = vxh->vx_vni;
	vs->rcv(vs, skb, &md);
	return 0;

drop:
	/* Consume bad packet */
	kfree_skb(skb);
	return 0;
bad_flags:
	pr_debug("invalid vxlan flags=%#x vni=%#x\n",
		 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));

error:
	/* Return non vxlan pkt */
	return 1;
}