Exemple #1
0
void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
		 const struct in6_addr *gw, int oif)
{
	if (this_cpu_read(nf_skb_duplicated))
		return;
	skb = pskb_copy(skb, GFP_ATOMIC);
	if (skb == NULL)
		return;

#if IS_ENABLED(CONFIG_NF_CONNTRACK)
	nf_reset(skb);
	nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
#endif
	if (hooknum == NF_INET_PRE_ROUTING ||
	    hooknum == NF_INET_LOCAL_IN) {
		struct ipv6hdr *iph = ipv6_hdr(skb);
		--iph->hop_limit;
	}
	if (nf_dup_ipv6_route(net, skb, gw, oif)) {
		__this_cpu_write(nf_skb_duplicated, true);
		ip6_local_out(net, skb->sk, skb);
		__this_cpu_write(nf_skb_duplicated, false);
	} else {
		kfree_skb(skb);
	}
}
Exemple #2
0
static unsigned int
tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
	const struct xt_tee_tginfo *info = par->targinfo;

	if (percpu_read(tee_active))
		return XT_CONTINUE;
	skb = pskb_copy(skb, GFP_ATOMIC);
	if (skb == NULL)
		return XT_CONTINUE;

#ifdef WITH_CONNTRACK
	nf_conntrack_put(skb->nfct);
	skb->nfct     = &nf_ct_untracked_get()->ct_general;
	skb->nfctinfo = IP_CT_NEW;
	nf_conntrack_get(skb->nfct);
#endif
	if (par->hooknum == NF_INET_PRE_ROUTING ||
	    par->hooknum == NF_INET_LOCAL_IN) {
		struct ipv6hdr *iph = ipv6_hdr(skb);
		--iph->hop_limit;
	}
	if (tee_tg_route6(skb, info)) {
		percpu_write(tee_active, true);
		ip6_local_out(skb);
		percpu_write(tee_active, false);
	} else {
		kfree_skb(skb);
	}
	return XT_CONTINUE;
}
Exemple #3
0
int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
{
    if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
        return -EHOSTUNREACH; /* Routing failure or similar. */

    return dccp_transmit_skb(sk, (skb_cloned(skb) ?
                      pskb_copy(skb, GFP_ATOMIC):
                      skb_clone(skb, GFP_ATOMIC)));
}
Exemple #4
0
/* Clone a packet.
 * The pkttag contents are NOT cloned.
 */
void *
osl_pktdup(osl_t *osh, void *skb)
{
	void * p;

	ASSERT(!PKTISCHAINED(skb));

	/* clear the CTFBUF flag if set and map the rest of the buffer
	 * before cloning.
	 */
	PKTCTFMAP(osh, skb);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
	if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
#else
	if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
#endif
		return NULL;

#ifdef CTFPOOL
	if (PKTISFAST(osh, skb)) {
		ctfpool_t *ctfpool;

		/* if the buffer allocated from ctfpool is cloned then
		 * we can't be sure when it will be freed. since there
		 * is a chance that we will be losing a buffer
		 * from our pool, we increment the refill count for the
		 * object to be alloced later.
		 */
		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
		ASSERT(ctfpool != NULL);
		PKTCLRFAST(osh, p);
		PKTCLRFAST(osh, skb);
		ctfpool->refills++;
	}
#endif /* CTFPOOL */

	/* Clear PKTC  context */
	PKTSETCLINK(p, NULL);
	PKTCCLRFLAGS(p);
	PKTCSETCNT(p, 1);
	PKTCSETLEN(p, PKTLEN(osh, skb));

	/* skb_clone copies skb->cb.. we don't want that */
	if (osh->pub.pkttag)
		OSL_PKTTAG_CLEAR(p);

	/* Increment the packet counter */
	atomic_inc(&osh->pktalloced);
	return (p);
}
Exemple #5
0
static unsigned int
tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
	const struct xt_tee_tginfo *info = par->targinfo;
	struct iphdr *iph;

	if (percpu_read(tee_active))
		return XT_CONTINUE;
	/*
	 * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
	 * the original skb, which should continue on its way as if nothing has
	 * happened. The copy should be independently delivered to the TEE
	 * --gateway.
	 */
	skb = pskb_copy(skb, GFP_ATOMIC);
	if (skb == NULL)
		return XT_CONTINUE;

#ifdef WITH_CONNTRACK
	/* Avoid counting cloned packets towards the original connection. */
	nf_conntrack_put(skb->nfct);
	skb->nfct     = &nf_ct_untracked_get()->ct_general;
	skb->nfctinfo = IP_CT_NEW;
	nf_conntrack_get(skb->nfct);
#endif
	/*
	 * If we are in PREROUTING/INPUT, the checksum must be recalculated
	 * since the length could have changed as a result of defragmentation.
	 *
	 * We also decrease the TTL to mitigate potential TEE loops
	 * between two hosts.
	 *
	 * Set %IP_DF so that the original source is notified of a potentially
	 * decreased MTU on the clone route. IPv6 does this too.
	 */
	iph = ip_hdr(skb);
	iph->frag_off |= htons(IP_DF);
	if (par->hooknum == NF_INET_PRE_ROUTING ||
	    par->hooknum == NF_INET_LOCAL_IN)
		--iph->ttl;
	ip_send_check(iph);

	if (tee_tg_route4(skb, info)) {
		percpu_write(tee_active, true);
		ip_local_out(skb);
		percpu_write(tee_active, false);
	} else {
		kfree_skb(skb);
	}
	return XT_CONTINUE;
}
Exemple #6
0
void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
		 const struct in_addr *gw, int oif)
{
	struct iphdr *iph;

	if (this_cpu_read(nf_skb_duplicated))
		return;
	/*
	 * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
	 * the original skb, which should continue on its way as if nothing has
	 * happened. The copy should be independently delivered to the gateway.
	 */
	skb = pskb_copy(skb, GFP_ATOMIC);
	if (skb == NULL)
		return;

#if IS_ENABLED(CONFIG_NF_CONNTRACK)
	/* Avoid counting cloned packets towards the original connection. */
	nf_conntrack_put(skb->nfct);
	skb->nfct     = &nf_ct_untracked_get()->ct_general;
	skb->nfctinfo = IP_CT_NEW;
	nf_conntrack_get(skb->nfct);
#endif
	/*
	 * If we are in PREROUTING/INPUT, the checksum must be recalculated
	 * since the length could have changed as a result of defragmentation.
	 *
	 * We also decrease the TTL to mitigate potential loops between two
	 * hosts.
	 *
	 * Set %IP_DF so that the original source is notified of a potentially
	 * decreased MTU on the clone route. IPv6 does this too.
	 */
	iph = ip_hdr(skb);
	iph->frag_off |= htons(IP_DF);
	if (hooknum == NF_INET_PRE_ROUTING ||
	    hooknum == NF_INET_LOCAL_IN)
		--iph->ttl;
	ip_send_check(iph);

	if (nf_dup_ipv4_route(net, skb, gw, oif)) {
		__this_cpu_write(nf_skb_duplicated, true);
		ip_local_out(net, skb->sk, skb);
		__this_cpu_write(nf_skb_duplicated, false);
	} else {
		kfree_skb(skb);
	}
}
void *
osl_pktdup(osl_t *osh, void *skb)
{
	void * p;

	ASSERT(!PKTISCHAINED(skb));

	
	PKTCTFMAP(osh, skb);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
	if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
#else
	if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
#endif
		return NULL;

#ifdef CTFPOOL
	if (PKTISFAST(osh, skb)) {
		ctfpool_t *ctfpool;

		
		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
		ASSERT(ctfpool != NULL);
		PKTCLRFAST(osh, p);
		PKTCLRFAST(osh, skb);
		ctfpool->refills++;
	}
#endif 

	
	PKTSETCLINK(p, NULL);
	PKTCCLRFLAGS(p);
	PKTCSETCNT(p, 1);
	PKTCSETLEN(p, PKTLEN(osh, skb));

	
	if (osh->pub.pkttag)
		OSL_PKTTAG_CLEAR(p);

	
	atomic_inc(&osh->pktalloced);
	return (p);
}
Exemple #8
0
static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct hsr_priv *hsr_priv;
	struct hsr_ethhdr *hsr_ethhdr;
	struct sk_buff *skb2;
	int res1, res2;

	hsr_priv = netdev_priv(dev);
	hsr_ethhdr = (struct hsr_ethhdr *) skb->data;

	if ((skb->protocol != htons(ETH_P_PRP)) ||
	    (hsr_ethhdr->ethhdr.h_proto != htons(ETH_P_PRP))) {
		hsr_fill_tag(hsr_ethhdr, hsr_priv);
		skb->protocol = htons(ETH_P_PRP);
	}

	skb2 = pskb_copy(skb, GFP_ATOMIC);

	res1 = NET_XMIT_DROP;
	if (likely(hsr_priv->slave[HSR_DEV_SLAVE_A]))
		res1 = slave_xmit(skb, hsr_priv, HSR_DEV_SLAVE_A);

	res2 = NET_XMIT_DROP;
	if (likely(skb2 && hsr_priv->slave[HSR_DEV_SLAVE_B]))
		res2 = slave_xmit(skb2, hsr_priv, HSR_DEV_SLAVE_B);

	if (likely(res1 == NET_XMIT_SUCCESS || res1 == NET_XMIT_CN ||
		   res2 == NET_XMIT_SUCCESS || res2 == NET_XMIT_CN)) {
		hsr_priv->dev->stats.tx_packets++;
		hsr_priv->dev->stats.tx_bytes += skb->len;
	} else {
		hsr_priv->dev->stats.tx_dropped++;
	}

	return NETDEV_TX_OK;
}
Exemple #9
0
/**
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
 *
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
 *
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
 */
static int tipc_bcbearer_send(struct sk_buff *buf,
			      struct tipc_bearer *unused1,
			      struct tipc_media_addr *unused2)
{
	int bp_index;

	/* Prepare broadcast link message for reliable transmission,
	 * if first time trying to send it;
	 * preparation is skipped for broadcast link protocol messages
	 * since they are sent in an unreliable manner and don't need it
	 */
	if (likely(!msg_non_seq(buf_msg(buf)))) {
		struct tipc_msg *msg;

		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
		msg = buf_msg(buf);
		msg_set_non_seq(msg, 1);
		msg_set_mc_netid(msg, tipc_net_id);
		bcl->stats.sent_info++;

		if (WARN_ON(!bclink->bcast_nodes.count)) {
			dump_stack();
			return 0;
		}
	}

	/* Send buffer over bearers until all targets reached */
	bcbearer->remains = bclink->bcast_nodes;

	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
		struct tipc_bearer *b = p;
		struct sk_buff *tbuf;

		if (!p)
			break; /* No more bearers to try */

		if (tipc_bearer_blocked(p)) {
			if (!s || tipc_bearer_blocked(s))
				continue; /* Can't use either bearer */
			b = s;
		}

		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
			       &bcbearer->remains_new);
		if (bcbearer->remains_new.count == bcbearer->remains.count)
			continue; /* Nothing added by bearer pair */

		if (bp_index == 0) {
			/* Use original buffer for first bearer */
			tipc_bearer_send(b, buf, &b->bcast_addr);
		} else {
			/* Avoid concurrent buffer access */
			tbuf = pskb_copy(buf, GFP_ATOMIC);
			if (!tbuf)
				break;
			tipc_bearer_send(b, tbuf, &b->bcast_addr);
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}

		/* Swap bearers for next packet */
		if (s) {
			bcbearer->bpairs[bp_index].primary = s;
			bcbearer->bpairs[bp_index].secondary = p;
		}

		if (bcbearer->remains_new.count == 0)
			break; /* All targets reached */

		bcbearer->remains = bcbearer->remains_new;
	}

	return 0;
}
struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
{
    return pskb_copy(skb, gfp_mask);
}