Exemplo n.º 1
0
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
	struct sockaddr_in sin;
	const struct iphdr *iph = ip_hdr(skb);
	__be16 *ports = (__be16 *)skb_transport_header(skb);

	if (skb_transport_offset(skb) + 4 > skb->len)
		return;

	/* All current transport protocols have the port numbers in the
	 * first four bytes of the transport header and this function is
	 * written with this assumption in mind.
	 */

	sin.sin_family = AF_INET;
	sin.sin_addr.s_addr = iph->daddr;
	sin.sin_port = ports[1];
	memset(sin.sin_zero, 0, sizeof(sin.sin_zero));

	put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
}
Exemplo n.º 2
0
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
	__be16 _ports[2], *ports;
	struct sockaddr_in sin;

	/* All current transport protocols have the port numbers in the
	 * first four bytes of the transport header and this function is
	 * written with this assumption in mind.
	 */
	ports = skb_header_pointer(skb, skb_transport_offset(skb),
				   sizeof(_ports), &_ports);
	if (!ports)
		return;

	sin.sin_family = AF_INET;
	sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
	sin.sin_port = ports[1];
	memset(sin.sin_zero, 0, sizeof(sin.sin_zero));

	put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
}
Exemplo n.º 3
0
static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
{
	unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
	struct mld_msg *mld;

	if (!ipv6_mc_may_pull(skb, len))
		return -EINVAL;

	mld = (struct mld_msg *)skb_transport_header(skb);

	switch (mld->mld_type) {
	case ICMPV6_MGM_REDUCTION:
	case ICMPV6_MGM_REPORT:
		return 0;
	case ICMPV6_MLD2_REPORT:
		return ipv6_mc_check_mld_reportv2(skb);
	case ICMPV6_MGM_QUERY:
		return ipv6_mc_check_mld_query(skb);
	default:
		return -ENOMSG;
	}
}
Exemplo n.º 4
0
static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
				 __be32 addr[4], const __be32 new_addr[4])
{
	int transport_len = skb->len - skb_transport_offset(skb);

	if (l4_proto == IPPROTO_TCP) {
		if (likely(transport_len >= sizeof(struct tcphdr)))
			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
						  addr, new_addr, 1);
	} else if (l4_proto == IPPROTO_UDP) {
		if (likely(transport_len >= sizeof(struct udphdr))) {
			struct udphdr *uh = udp_hdr(skb);

			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
				inet_proto_csum_replace16(&uh->check, skb,
							  addr, new_addr, 1);
				if (!uh->check)
					uh->check = CSUM_MANGLED_0;
			}
		}
	}
}
Exemplo n.º 5
0
static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb,
	int vlan_tag_insert, unsigned int vlan_tag)
{
	unsigned int head_len = skb_headlen(skb);
	unsigned int len_left = skb->len - head_len;
	unsigned int hdr_len = skb_transport_offset(skb);
	unsigned int csum_offset = hdr_len + skb->csum_offset;
	int eop = (len_left == 0);

	
	enic_queue_wq_desc_csum_l4(wq, skb,
		pci_map_single(enic->pdev, skb->data,
			head_len, PCI_DMA_TODEVICE),
		head_len,
		csum_offset,
		hdr_len,
		vlan_tag_insert, vlan_tag,
		eop);

	if (!eop)
		enic_queue_wq_skb_cont(enic, wq, skb, len_left);
}
Exemplo n.º 6
0
static void vxlan_gso(struct sk_buff *skb)
{
	int udp_offset = skb_transport_offset(skb);
	struct udphdr *uh;

	uh = udp_hdr(skb);
	uh->len = htons(skb->len - udp_offset);

	/* csum segment if tunnel sets skb with csum. */
	if (unlikely(uh->check)) {
		struct iphdr *iph = ip_hdr(skb);

		uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
					       skb->len - udp_offset,
					       IPPROTO_UDP, 0);
		uh->check = csum_fold(skb_checksum(skb, udp_offset,
				      skb->len - udp_offset, 0));

		if (uh->check == 0)
			uh->check = CSUM_MANGLED_0;

	}
	skb->ip_summed = CHECKSUM_NONE;
}
Exemplo n.º 7
0
Arquivo: flow.c Projeto: IDM350/linux
void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
{
	struct flow_stats *stats;
	__be16 tcp_flags = 0;

	if (!flow->stats.is_percpu)
		stats = flow->stats.stat;
	else
		stats = this_cpu_ptr(flow->stats.cpu_stats);

	if ((flow->key.eth.type == htons(ETH_P_IP) ||
	     flow->key.eth.type == htons(ETH_P_IPV6)) &&
	    flow->key.ip.proto == IPPROTO_TCP &&
	    likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
		tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
	}

	spin_lock(&stats->lock);
	stats->used = jiffies;
	stats->packet_count++;
	stats->byte_count += skb->len;
	stats->tcp_flags |= tcp_flags;
	spin_unlock(&stats->lock);
}
Exemplo n.º 8
0
static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
{
	int err;
	int extlen;
	struct ipv6hdr *top_iph;
	struct ip_auth_hdr *ah;
	struct ah_data *ahp;
	u8 nexthdr;
	char tmp_base[8];
	struct {
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
		struct in6_addr saddr;
#endif
		struct in6_addr daddr;
		char hdrs[0];
	} *tmp_ext;

	skb_push(skb, -skb_network_offset(skb));
	top_iph = ipv6_hdr(skb);
	top_iph->payload_len = htons(skb->len - sizeof(*top_iph));

	nexthdr = *skb_mac_header(skb);
	*skb_mac_header(skb) = IPPROTO_AH;

	/* When there are no extension headers, we only need to save the first
	 * 8 bytes of the base IP header.
	 */
	memcpy(tmp_base, top_iph, sizeof(tmp_base));

	tmp_ext = NULL;
	extlen = skb_transport_offset(skb) - sizeof(struct ipv6hdr);
	if (extlen) {
		extlen += sizeof(*tmp_ext);
		tmp_ext = kmalloc(extlen, GFP_ATOMIC);
		if (!tmp_ext) {
			err = -ENOMEM;
			goto error;
		}
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
		memcpy(tmp_ext, &top_iph->saddr, extlen);
#else
		memcpy(tmp_ext, &top_iph->daddr, extlen);
#endif
		err = ipv6_clear_mutable_options(top_iph,
						 extlen - sizeof(*tmp_ext) +
						 sizeof(*top_iph),
						 XFRM_POLICY_OUT);
		if (err)
			goto error_free_iph;
	}

	ah = ip_auth_hdr(skb);
	ah->nexthdr = nexthdr;

	top_iph->priority    = 0;
	top_iph->flow_lbl[0] = 0;
	top_iph->flow_lbl[1] = 0;
	top_iph->flow_lbl[2] = 0;
	top_iph->hop_limit   = 0;

	ahp = x->data;
	ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;

	ah->reserved = 0;
	ah->spi = x->id.spi;
	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);

	spin_lock_bh(&x->lock);
	err = ah_mac_digest(ahp, skb, ah->auth_data);
	memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);
	spin_unlock_bh(&x->lock);

	if (err)
		goto error_free_iph;

	memcpy(top_iph, tmp_base, sizeof(tmp_base));
	if (tmp_ext) {
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
		memcpy(&top_iph->saddr, tmp_ext, extlen);
#else
		memcpy(&top_iph->daddr, tmp_ext, extlen);
#endif
error_free_iph:
		kfree(tmp_ext);
	}

error:
	return err;
}
Exemplo n.º 9
0
static int ipv6_frag_rcv(struct sk_buff *skb)
{
	struct frag_hdr *fhdr;
	struct frag_queue *fq;
	const struct ipv6hdr *hdr = ipv6_hdr(skb);
	struct net *net = dev_net(skb_dst(skb)->dev);
	int iif;

	if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
		goto fail_hdr;

	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);

	/* Jumbo payload inhibits frag. header */
	if (hdr->payload_len == 0)
		goto fail_hdr;

	if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
				 sizeof(struct frag_hdr))))
		goto fail_hdr;

	hdr = ipv6_hdr(skb);
	fhdr = (struct frag_hdr *)skb_transport_header(skb);

	if (!(fhdr->frag_off & htons(0xFFF9))) {
		/* It is not a fragmented frame */
		skb->transport_header += sizeof(struct frag_hdr);
		__IP6_INC_STATS(net,
				ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);

		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
		IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
		return 1;
	}

	iif = skb->dev ? skb->dev->ifindex : 0;
	fq = fq_find(net, fhdr->identification, hdr, iif);
	if (fq) {
		u32 prob_offset = 0;
		int ret;

		spin_lock(&fq->q.lock);

		fq->iif = iif;
		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff,
				     &prob_offset);

		spin_unlock(&fq->q.lock);
		inet_frag_put(&fq->q);
		if (prob_offset) {
			__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
					IPSTATS_MIB_INHDRERRORS);
			/* icmpv6_param_prob() calls kfree_skb(skb) */
			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
		}
		return ret;
	}

	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
	kfree_skb(skb);
	return -1;

fail_hdr:
	__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
			IPSTATS_MIB_INHDRERRORS);
	icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
	return -1;
}
Exemplo n.º 10
0
static int fill_sg_in(struct scatterlist *sg_in,
		      struct sk_buff *skb,
		      struct tls_offload_context_tx *ctx,
		      u64 *rcd_sn,
		      s32 *sync_size,
		      int *resync_sgs)
{
	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
	int payload_len = skb->len - tcp_payload_offset;
	u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
	struct tls_record_info *record;
	unsigned long flags;
	int remaining;
	int i;

	spin_lock_irqsave(&ctx->lock, flags);
	record = tls_get_record(ctx, tcp_seq, rcd_sn);
	if (!record) {
		spin_unlock_irqrestore(&ctx->lock, flags);
		WARN(1, "Record not found for seq %u\n", tcp_seq);
		return -EINVAL;
	}

	*sync_size = tcp_seq - tls_record_start_seq(record);
	if (*sync_size < 0) {
		int is_start_marker = tls_record_is_start_marker(record);

		spin_unlock_irqrestore(&ctx->lock, flags);
		/* This should only occur if the relevant record was
		 * already acked. In that case it should be ok
		 * to drop the packet and avoid retransmission.
		 *
		 * There is a corner case where the packet contains
		 * both an acked and a non-acked record.
		 * We currently don't handle that case and rely
		 * on TCP to retranmit a packet that doesn't contain
		 * already acked payload.
		 */
		if (!is_start_marker)
			*sync_size = 0;
		return -EINVAL;
	}

	remaining = *sync_size;
	for (i = 0; remaining > 0; i++) {
		skb_frag_t *frag = &record->frags[i];

		__skb_frag_ref(frag);
		sg_set_page(sg_in + i, skb_frag_page(frag),
			    skb_frag_size(frag), frag->page_offset);

		remaining -= skb_frag_size(frag);

		if (remaining < 0)
			sg_in[i].length += remaining;
	}
	*resync_sgs = i;

	spin_unlock_irqrestore(&ctx->lock, flags);
	if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
		return -EINVAL;

	return 0;
}
Exemplo n.º 11
0
static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
			int nh_len)
{
	struct icmp6hdr *icmp = icmp6_hdr(skb);

	/* The ICMPv6 type and code fields use the 16-bit transport port
	 * fields, so we need to store them in 16-bit network byte order.
	 */
	key->tp.src = htons(icmp->icmp6_type);
	key->tp.dst = htons(icmp->icmp6_code);
	memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));

	if (icmp->icmp6_code == 0 &&
	    (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
	     icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
		int icmp_len = skb->len - skb_transport_offset(skb);
		struct nd_msg *nd;
		int offset;

		/* In order to process neighbor discovery options, we need the
		 * entire packet.
		 */
		if (unlikely(icmp_len < sizeof(*nd)))
			return 0;

		if (unlikely(skb_linearize(skb)))
			return -ENOMEM;

		nd = (struct nd_msg *)skb_transport_header(skb);
		key->ipv6.nd.target = nd->target;

		icmp_len -= sizeof(*nd);
		offset = 0;
		while (icmp_len >= 8) {
			struct nd_opt_hdr *nd_opt =
				 (struct nd_opt_hdr *)(nd->opt + offset);
			int opt_len = nd_opt->nd_opt_len * 8;

			if (unlikely(!opt_len || opt_len > icmp_len))
				return 0;

			/* Store the link layer address if the appropriate
			 * option is provided.  It is considered an error if
			 * the same link layer option is specified twice.
			 */
			if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
			    && opt_len == 8) {
				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
					goto invalid;
				ether_addr_copy(key->ipv6.nd.sll,
						&nd->opt[offset+sizeof(*nd_opt)]);
			} else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
				   && opt_len == 8) {
				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
					goto invalid;
				ether_addr_copy(key->ipv6.nd.tll,
						&nd->opt[offset+sizeof(*nd_opt)]);
			}

			icmp_len -= opt_len;
			offset += opt_len;
		}
	}

	return 0;

invalid:
	memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
	memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
	memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));

	return 0;
}
Exemplo n.º 12
0
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
{
	struct mlx5_wq_cyc       *wq   = &sq->wq;

	u16 pi = sq->pc & wq->sz_m1;
	struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
	struct mlx5e_tx_wqe_info *wi   = &sq->wqe_info[pi];

	struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
	struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
	struct mlx5_wqe_data_seg *dseg;

	unsigned char *skb_data = skb->data;
	unsigned int skb_len = skb->len;
	u8  opcode = MLX5_OPCODE_SEND;
	dma_addr_t dma_addr = 0;
	unsigned int num_bytes;
	bool bf = false;
	u16 headlen;
	u16 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
	u16 ihs;
	int i;

	memset(wqe, 0, sizeof(*wqe));

	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
		eseg->cs_flags	= MLX5_ETH_WQE_L3_CSUM;
		if (skb->encapsulation) {
			eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
					  MLX5_ETH_WQE_L4_INNER_CSUM;
			sq->stats.csum_partial_inner++;
		} else {
			eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
			sq->stats.csum_partial++;
		}
	} else
		sq->stats.csum_none++;

	if (sq->cc != sq->prev_cc) {
		sq->prev_cc = sq->cc;
		sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
	}

	if (skb_is_gso(skb)) {
		eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
		opcode    = MLX5_OPCODE_LSO;

		if (skb->encapsulation) {
			ihs = skb_inner_transport_header(skb) - skb->data +
			      inner_tcp_hdrlen(skb);
			sq->stats.tso_inner_packets++;
			sq->stats.tso_inner_bytes += skb->len - ihs;
		} else {
			ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
			sq->stats.tso_packets++;
			sq->stats.tso_bytes += skb->len - ihs;
		}

		num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
	} else {
		bf = sq->bf_budget   &&
		     !skb->xmit_more &&
		     !skb_shinfo(skb)->nr_frags;
		ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
		num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
	}

	wi->num_bytes = num_bytes;

	if (skb_vlan_tag_present(skb)) {
		mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
				  &skb_len);
		ihs += VLAN_HLEN;
	} else {
		memcpy(eseg->inline_hdr_start, skb_data, ihs);
		mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
	}

	eseg->inline_hdr_sz = cpu_to_be16(ihs);

	ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
			       MLX5_SEND_WQE_DS);

	dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;

	wi->num_dma = 0;

	headlen = skb_len - skb->data_len;
	if (headlen) {
		dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
					  DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
			goto dma_unmap_wqe_err;

		dseg->addr       = cpu_to_be64(dma_addr);
		dseg->lkey       = sq->mkey_be;
		dseg->byte_count = cpu_to_be32(headlen);

		mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
		wi->num_dma++;

		dseg++;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
		int fsz = skb_frag_size(frag);

		dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
					    DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
			goto dma_unmap_wqe_err;

		dseg->addr       = cpu_to_be64(dma_addr);
		dseg->lkey       = sq->mkey_be;
		dseg->byte_count = cpu_to_be32(fsz);

		mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
		wi->num_dma++;

		dseg++;
	}

	ds_cnt += wi->num_dma;

	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);

	sq->skb[pi] = skb;

	wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
	sq->pc += wi->num_wqebbs;

	if (unlikely(MLX5E_TX_HW_STAMP(sq->channel->priv, skb)))
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;

	netdev_tx_sent_queue(sq->txq, wi->num_bytes);

	if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
		netif_tx_stop_queue(sq->txq);
		sq->stats.queue_stopped++;
	}

	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
		int bf_sz = 0;

		if (bf && sq->uar_bf_map)
			bf_sz = wi->num_wqebbs << 3;

		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
		mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
	}

	sq->bf_budget = bf ? sq->bf_budget - 1 : 0;

	/* fill sq edge with nops to avoid wqe wrap around */
	while ((sq->pc & wq->sz_m1) > sq->edge)
		mlx5e_send_nop(sq, false);

	sq->stats.packets++;
	sq->stats.bytes += num_bytes;
	return NETDEV_TX_OK;

dma_unmap_wqe_err:
	sq->stats.queue_dropped++;
	mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);

	dev_kfree_skb_any(skb);

	return NETDEV_TX_OK;
}
Exemplo n.º 13
0
static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
{
	skb->ip_summed = CHECKSUM_NONE;
	skb->csum_not_inet = 0;
	return sctp_compute_cksum(skb, skb_transport_offset(skb));
}
Exemplo n.º 14
0
Arquivo: gso.c Projeto: ALutzG/ovs
static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb,
					   netdev_features_t features,
					   bool tx_path,
					   sa_family_t sa_family)
{
	void *iph = skb_network_header(skb);
	int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */
	int mac_offset = skb_inner_mac_offset(skb);
	int outer_l3_offset = skb_network_offset(skb);
	int outer_l4_offset = skb_transport_offset(skb);
	struct sk_buff *skb1 = skb;
	struct dst_entry *dst = skb_dst(skb);
	struct sk_buff *segs;
	__be16 proto = skb->protocol;
	char cb[sizeof(skb->cb)];

	BUILD_BUG_ON(sizeof(struct ovs_gso_cb) > FIELD_SIZEOF(struct sk_buff, cb));
	OVS_GSO_CB(skb)->ipv6 = (sa_family == AF_INET6);
	/* setup whole inner packet to get protocol. */
	__skb_pull(skb, mac_offset);
	skb->protocol = __skb_network_protocol(skb);

	/* setup l3 packet to gso, to get around segmentation bug on older kernel.*/
	__skb_pull(skb, (pkt_hlen - mac_offset));
	skb_reset_mac_header(skb);
	skb_reset_network_header(skb);
	skb_reset_transport_header(skb);

	/* From 3.9 kernel skb->cb is used by skb gso. Therefore
	 * make copy of it to restore it back. */
	memcpy(cb, skb->cb, sizeof(cb));

	skb->encapsulation = 0;

	/* We are handling offloads by segmenting l3 packet, so
	 * no need to call OVS compat segmentation function. */

#ifdef HAVE___SKB_GSO_SEGMENT
#undef __skb_gso_segment
	segs = __skb_gso_segment(skb, 0, tx_path);
#else
#undef skb_gso_segment
	segs = skb_gso_segment(skb, 0);
#endif

	if (!segs || IS_ERR(segs))
		goto free;

	skb = segs;
	while (skb) {
		__skb_push(skb, pkt_hlen);
		skb_reset_mac_header(skb);
		skb_set_network_header(skb, outer_l3_offset);
		skb_set_transport_header(skb, outer_l4_offset);
		skb->mac_len = 0;

		memcpy(skb_network_header(skb), iph, pkt_hlen);
		memcpy(skb->cb, cb, sizeof(cb));

		skb->protocol = proto;
		if (skb->next)
			dst = dst_clone(dst);

		skb_dst_set(skb, dst);
		OVS_GSO_CB(skb)->fix_segment(skb);

		skb = skb->next;
	}
free:
	consume_skb(skb1);
	return segs;
}
Exemplo n.º 15
0
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
	struct hv_netvsc_packet *packet = NULL;
	int ret;
	unsigned int num_data_pgs;
	struct rndis_message *rndis_msg;
	struct rndis_packet *rndis_pkt;
	u32 rndis_msg_size;
	struct rndis_per_packet_info *ppi;
	u32 hash;
	struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
	struct hv_page_buffer *pb = page_buf;

	/* We will atmost need two pages to describe the rndis
	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
	 * of pages in a single packet. If skb is scattered around
	 * more pages we try linearizing it.
	 */

	num_data_pgs = netvsc_get_slots(skb) + 2;

	if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
		++net_device_ctx->eth_stats.tx_scattered;

		if (skb_linearize(skb))
			goto no_memory;

		num_data_pgs = netvsc_get_slots(skb) + 2;
		if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
			++net_device_ctx->eth_stats.tx_too_big;
			goto drop;
		}
	}

	/*
	 * Place the rndis header in the skb head room and
	 * the skb->cb will be used for hv_netvsc_packet
	 * structure.
	 */
	ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
	if (ret)
		goto no_memory;

	/* Use the skb control buffer for building up the packet */
	BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
			FIELD_SIZEOF(struct sk_buff, cb));
	packet = (struct hv_netvsc_packet *)skb->cb;

	packet->q_idx = skb_get_queue_mapping(skb);

	packet->total_data_buflen = skb->len;
	packet->total_bytes = skb->len;
	packet->total_packets = 1;

	rndis_msg = (struct rndis_message *)skb->head;

	memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);

	/* Add the rndis header */
	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
	rndis_msg->msg_len = packet->total_data_buflen;
	rndis_pkt = &rndis_msg->msg.pkt;
	rndis_pkt->data_offset = sizeof(struct rndis_packet);
	rndis_pkt->data_len = packet->total_data_buflen;
	rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);

	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);

	hash = skb_get_hash_raw(skb);
	if (hash != 0 && net->real_num_tx_queues > 1) {
		rndis_msg_size += NDIS_HASH_PPI_SIZE;
		ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
				    NBL_HASH_VALUE);
		*(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
	}

	if (skb_vlan_tag_present(skb)) {
		struct ndis_pkt_8021q_info *vlan;

		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
		ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
					IEEE_8021Q_INFO);
		vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
						ppi->ppi_offset);
		vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
		vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
				VLAN_PRIO_SHIFT;
	}

	if (skb_is_gso(skb)) {
		struct ndis_tcp_lso_info *lso_info;

		rndis_msg_size += NDIS_LSO_PPI_SIZE;
		ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
				    TCP_LARGESEND_PKTINFO);

		lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
							ppi->ppi_offset);

		lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
		if (skb->protocol == htons(ETH_P_IP)) {
			lso_info->lso_v2_transmit.ip_version =
				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
			ip_hdr(skb)->tot_len = 0;
			ip_hdr(skb)->check = 0;
			tcp_hdr(skb)->check =
				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
						   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
		} else {
			lso_info->lso_v2_transmit.ip_version =
				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
			ipv6_hdr(skb)->payload_len = 0;
			tcp_hdr(skb)->check =
				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
						 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
		}
		lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
		lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
		if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
			struct ndis_tcp_ip_checksum_info *csum_info;

			rndis_msg_size += NDIS_CSUM_PPI_SIZE;
			ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
					    TCPIP_CHKSUM_PKTINFO);

			csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
									 ppi->ppi_offset);

			csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);

			if (skb->protocol == htons(ETH_P_IP)) {
				csum_info->transmit.is_ipv4 = 1;

				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
					csum_info->transmit.tcp_checksum = 1;
				else
					csum_info->transmit.udp_checksum = 1;
			} else {
				csum_info->transmit.is_ipv6 = 1;

				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
					csum_info->transmit.tcp_checksum = 1;
				else
					csum_info->transmit.udp_checksum = 1;
			}
		} else {
			/* Can't do offload of this type of checksum */
			if (skb_checksum_help(skb))
				goto drop;
		}
	}

	/* Start filling in the page buffers with the rndis hdr */
	rndis_msg->msg_len += rndis_msg_size;
	packet->total_data_buflen = rndis_msg->msg_len;
	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
					       skb, packet, &pb);

	/* timestamp packet in software */
	skb_tx_timestamp(skb);
	ret = netvsc_send(net_device_ctx->device_ctx, packet,
			  rndis_msg, &pb, skb);
	if (likely(ret == 0))
		return NETDEV_TX_OK;

	if (ret == -EAGAIN) {
		++net_device_ctx->eth_stats.tx_busy;
		return NETDEV_TX_BUSY;
	}

	if (ret == -ENOSPC)
		++net_device_ctx->eth_stats.tx_no_space;

drop:
	dev_kfree_skb_any(skb);
	net->stats.tx_dropped++;

	return NETDEV_TX_OK;

no_memory:
	++net_device_ctx->eth_stats.tx_no_memory;
	goto drop;
}
Exemplo n.º 16
0
inline void *chcr_crypto_wreq(struct sk_buff *skb,
			       struct net_device *dev,
			       void *pos,
			       int credits,
			       struct ipsec_sa_entry *sa_entry)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;
	unsigned int immdatalen = 0;
	unsigned int ivsize = GCM_ESP_IV_SIZE;
	struct chcr_ipsec_wr *wr;
	unsigned int flits;
	u32 wr_mid;
	int qidx = skb_get_queue_mapping(skb);
	struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
	unsigned int kctx_len = sa_entry->kctx_len;
	int qid = q->q.cntxt_id;

	atomic_inc(&adap->chcr_stats.ipsec_cnt);

	flits = calc_tx_sec_flits(skb, kctx_len);

	if (is_eth_imm(skb, kctx_len))
		immdatalen = skb->len;

	/* WR Header */
	wr = (struct chcr_ipsec_wr *)pos;
	wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
	wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));

	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
		netif_tx_stop_queue(q->txq);
		q->q.stops++;
		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
	}
	wr_mid |= FW_ULPTX_WR_DATA_F;
	wr->wreq.flowid_len16 = htonl(wr_mid);

	/* ULPTX */
	wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
	wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2)  - 1);

	/* Sub-command */
	wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
	wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
					 sizeof(wr->req.key_ctx) +
					 kctx_len +
					 sizeof(struct cpl_tx_pkt_core) +
					 immdatalen);

	/* CPL_SEC_PDU */
	wr->req.sec_cpl.op_ivinsrtofst = htonl(
				CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
				CPL_TX_SEC_PDU_CPLLEN_V(2) |
				CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
				CPL_TX_SEC_PDU_IVINSRTOFST_V(
				(skb_transport_offset(skb) +
				sizeof(struct ip_esp_hdr) + 1)));

	wr->req.sec_cpl.pldlen = htonl(skb->len);

	wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
				(skb_transport_offset(skb) + 1),
				(skb_transport_offset(skb) +
				 sizeof(struct ip_esp_hdr)),
				(skb_transport_offset(skb) +
				 sizeof(struct ip_esp_hdr) +
				 GCM_ESP_IV_SIZE + 1), 0);

	wr->req.sec_cpl.cipherstop_lo_authinsert =
		FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
					   sizeof(struct ip_esp_hdr) +
					   GCM_ESP_IV_SIZE + 1,
					   sa_entry->authsize,
					   sa_entry->authsize);
	wr->req.sec_cpl.seqno_numivs =
		FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
					 CHCR_SCMD_CIPHER_MODE_AES_GCM,
					 CHCR_SCMD_AUTH_MODE_GHASH,
					 sa_entry->hmac_ctrl,
					 ivsize >> 1);
	wr->req.sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
								  0, 0, 0);

	pos += sizeof(struct fw_ulptx_wr) +
	       sizeof(struct ulp_txpkt) +
	       sizeof(struct ulptx_idata) +
	       sizeof(struct cpl_tx_sec_pdu);

	pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);

	return pos;
}
Exemplo n.º 17
0
static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
{
	int err;
	struct iphdr *top_iph;
	struct ip_esp_hdr *esph;
	struct crypto_blkcipher *tfm;
	struct blkcipher_desc desc;
	struct esp_data *esp;
	struct sk_buff *trailer;
	int blksize;
	int clen;
	int alen;
	int nfrags;

	/* Strip IP+ESP header. */
	__skb_pull(skb, skb_transport_offset(skb));
	/* Now skb is pure payload to encrypt */

	err = -ENOMEM;

	/* Round to block size */
	clen = skb->len;

	esp = x->data;
	alen = esp->auth.icv_trunc_len;
	tfm = esp->conf.tfm;
	desc.tfm = tfm;
	desc.flags = 0;
	blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
	clen = ALIGN(clen + 2, blksize);
	if (esp->conf.padlen)
		clen = ALIGN(clen, esp->conf.padlen);

	if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
		goto error;

	/* Fill padding... */
	do {
		int i;
		for (i=0; i<clen-skb->len - 2; i++)
			*(u8*)(trailer->tail + i) = i+1;
	} while (0);
	*(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;
	pskb_put(skb, trailer, clen - skb->len);

	__skb_push(skb, skb->data - skb_network_header(skb));
	top_iph = skb->nh.iph;
	esph = (struct ip_esp_hdr *)(skb_network_header(skb) +
				     top_iph->ihl * 4);
	top_iph->tot_len = htons(skb->len + alen);
	*(u8*)(trailer->tail - 1) = top_iph->protocol;

	/* this is non-NULL only with UDP Encapsulation */
	if (x->encap) {
		struct xfrm_encap_tmpl *encap = x->encap;
		struct udphdr *uh;
		__be32 *udpdata32;

		uh = (struct udphdr *)esph;
		uh->source = encap->encap_sport;
		uh->dest = encap->encap_dport;
		uh->len = htons(skb->len + alen - top_iph->ihl*4);
		uh->check = 0;

		switch (encap->encap_type) {
		default:
		case UDP_ENCAP_ESPINUDP:
			esph = (struct ip_esp_hdr *)(uh + 1);
			break;
		case UDP_ENCAP_ESPINUDP_NON_IKE:
			udpdata32 = (__be32 *)(uh + 1);
			udpdata32[0] = udpdata32[1] = 0;
			esph = (struct ip_esp_hdr *)(udpdata32 + 2);
			break;
		}

		top_iph->protocol = IPPROTO_UDP;
	} else
		top_iph->protocol = IPPROTO_ESP;

	esph->spi = x->id.spi;
	esph->seq_no = htonl(++x->replay.oseq);
	xfrm_aevent_doreplay(x);

	if (esp->conf.ivlen) {
		if (unlikely(!esp->conf.ivinitted)) {
			get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
			esp->conf.ivinitted = 1;
		}
		crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
	}

	do {
		struct scatterlist *sg = &esp->sgbuf[0];

		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
			if (!sg)
				goto error;
		}
		skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
		err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
		if (unlikely(sg != &esp->sgbuf[0]))
			kfree(sg);
	} while (0);

	if (unlikely(err))
		goto error;

	if (esp->conf.ivlen) {
		memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
		crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
	}

	if (esp->auth.icv_full_len) {
		err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
				     sizeof(*esph) + esp->conf.ivlen + clen);
		memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
	}

	ip_send_check(top_iph);

error:
	return err;
}
Exemplo n.º 18
0
static inline void enic_queue_wq_skb_tso(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
	int vlan_tag_insert, unsigned int vlan_tag)
{
	unsigned int frag_len_left = skb_headlen(skb);
	unsigned int len_left = skb->len - frag_len_left;
	unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	int eop = (len_left == 0);
	unsigned int len;
	dma_addr_t dma_addr;
	unsigned int offset = 0;
	skb_frag_t *frag;

	

	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
		ip_hdr(skb)->check = 0;
		tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
			ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
			&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
	}

	
	while (frag_len_left) {
		len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
		dma_addr = pci_map_single(enic->pdev, skb->data + offset,
				len, PCI_DMA_TODEVICE);
		enic_queue_wq_desc_tso(wq, skb,
			dma_addr,
			len,
			mss, hdr_len,
			vlan_tag_insert, vlan_tag,
			eop && (len == frag_len_left));
		frag_len_left -= len;
		offset += len;
	}

	if (eop)
		return;

	
	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
		len_left -= frag->size;
		frag_len_left = frag->size;
		offset = frag->page_offset;

		while (frag_len_left) {
			len = min(frag_len_left,
				(unsigned int)WQ_ENET_MAX_DESC_LEN);
			dma_addr = pci_map_page(enic->pdev, frag->page,
				offset, len,
				PCI_DMA_TODEVICE);
			enic_queue_wq_desc_cont(wq, skb,
				dma_addr,
				len,
				(len_left == 0) &&
				(len == frag_len_left));	
			frag_len_left -= len;
			offset += len;
		}
	}
}
Exemplo n.º 19
0
static int ipv6_frag_rcv(struct sk_buff *skb)
{
	struct frag_hdr *fhdr;
	struct frag_queue *fq;
	const struct ipv6hdr *hdr = ipv6_hdr(skb);
	struct net *net = dev_net(skb_dst(skb)->dev);
	int evicted;

	if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
		goto fail_hdr;

	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);

	/* Jumbo payload inhibits frag. header */
	if (hdr->payload_len==0)
		goto fail_hdr;

	if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
				 sizeof(struct frag_hdr))))
		goto fail_hdr;

	hdr = ipv6_hdr(skb);
	fhdr = (struct frag_hdr *)skb_transport_header(skb);

	if (!(fhdr->frag_off & htons(0xFFF9))) {
		/* It is not a fragmented frame */
		skb->transport_header += sizeof(struct frag_hdr);
		IP6_INC_STATS_BH(net,
				 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);

		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
		IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
		return 1;
	}

	evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false);
	if (evicted)
		IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
				 IPSTATS_MIB_REASMFAILS, evicted);

	fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
		     ip6_frag_ecn(hdr));
	if (fq != NULL) {
		int ret;

		spin_lock(&fq->q.lock);

		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);

		spin_unlock(&fq->q.lock);
		inet_frag_put(&fq->q, &ip6_frags);
		return ret;
	}

	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
	kfree_skb(skb);
	return -1;

fail_hdr:
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
	icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
	return -1;
}
Exemplo n.º 20
0
static inline int
dma_xmit(struct sk_buff* skb, struct net_device *dev, END_DEVICE *ei_local, int gmac_no)
{
	struct netdev_queue *txq;
	dma_addr_t frag_addr;
	u32 frag_size, nr_desc;
	u32 next_idx, desc_odd = 0;
	u32 txd_info2 = 0, txd_info4;
#if defined (CONFIG_RAETH_SG_DMA_TX)
	u32 i, nr_frags;
	const skb_frag_t *tx_frag;
	const struct skb_shared_info *shinfo;
#else
#define nr_frags 0
#endif

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
	if (ra_sw_nat_hook_tx != NULL) {
#if defined (CONFIG_RA_HW_NAT_WIFI) || defined (CONFIG_RA_HW_NAT_PCI)
		if (IS_DPORT_PPE_VALID(skb))
			gmac_no = PSE_PORT_PPE;
		else
#endif
		if (ra_sw_nat_hook_tx(skb, gmac_no) == 0) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
	}
#endif

#if !defined (RAETH_HW_PADPKT)
	if (skb->len < ei_local->min_pkt_len) {
		if (skb_padto(skb, ei_local->min_pkt_len)) {
#if defined (CONFIG_RAETH_DEBUG)
			if (net_ratelimit())
				printk(KERN_ERR "%s: skb_padto failed\n", RAETH_DEV_NAME);
#endif
			return NETDEV_TX_OK;
		}
		skb_put(skb, ei_local->min_pkt_len - skb->len);
	}
#endif

#if defined (CONFIG_RALINK_MT7620)
	if (gmac_no == PSE_PORT_PPE)
		txd_info4 = TX4_DMA_FP_BMAP(0x80); /* P7 */
	else
#if defined (CONFIG_RAETH_HAS_PORT5) && !defined (CONFIG_RAETH_HAS_PORT4) && !defined (CONFIG_RAETH_ESW)
		txd_info4 = TX4_DMA_FP_BMAP(0x20); /* P5 */
#elif defined (CONFIG_RAETH_HAS_PORT4) && !defined (CONFIG_RAETH_HAS_PORT5) && !defined (CONFIG_RAETH_ESW)
		txd_info4 = TX4_DMA_FP_BMAP(0x10); /* P4 */
#else
		txd_info4 = 0; /* routing by DA */
#endif
#elif defined (CONFIG_RALINK_MT7621)
	txd_info4 = TX4_DMA_FPORT(gmac_no);
#else
	txd_info4 = (TX4_DMA_QN(3) | TX4_DMA_PN(gmac_no));
#endif

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && !defined (RAETH_SDMA)
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		txd_info4 |= TX4_DMA_TUI_CO(7);
#endif

#if defined (CONFIG_RAETH_HW_VLAN_TX)
	if (skb_vlan_tag_present(skb)) {
#if defined (RAETH_HW_VLAN4K)
		txd_info4 |= (0x10000 | skb_vlan_tag_get(skb));
#else
		u32 vlan_tci = skb_vlan_tag_get(skb);
		txd_info4 |= (TX4_DMA_INSV | TX4_DMA_VPRI(vlan_tci));
		txd_info4 |= (u32)ei_local->vlan_4k_map[(vlan_tci & VLAN_VID_MASK)];
#endif
	}
#endif

#if defined (CONFIG_RAETH_SG_DMA_TX)
	shinfo = skb_shinfo(skb);
#endif

#if defined (CONFIG_RAETH_TSO)
	/* fill MSS info in tcp checksum field */
	if (shinfo->gso_size) {
		u32 hdr_len;
		
		if (!(shinfo->gso_type & (SKB_GSO_TCPV4|SKB_GSO_TCPV6))) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		if (skb_header_cloned(skb)) {
			if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
				dev_kfree_skb(skb);
				return NETDEV_TX_OK;
			}
		}
		
		hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
		if (hdr_len >= skb->len) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		tcp_hdr(skb)->check = htons(shinfo->gso_size);
		txd_info4 |= TX4_DMA_TSO;
	}
#endif

	nr_desc = DIV_ROUND_UP(skb_headlen(skb), TXD_MAX_SEG_SIZE);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	nr_frags = (u32)shinfo->nr_frags;

	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		nr_desc += DIV_ROUND_UP(skb_frag_size(tx_frag), TXD_MAX_SEG_SIZE);
	}
#endif
	nr_desc = DIV_ROUND_UP(nr_desc, 2);

	txq = netdev_get_tx_queue(dev, 0);

	/* flush main skb part before spin_lock() */
	frag_size = (u32)skb_headlen(skb);
	frag_addr = dma_map_single(NULL, skb->data, frag_size, DMA_TO_DEVICE);

	/* protect TX ring access (from eth2/eth3 queues) */
	spin_lock(&ei_local->page_lock);

	/* check nr_desc+1 free descriptors */
	next_idx = (ei_local->txd_last_idx + nr_desc) % NUM_TX_DESC;
	if (ei_local->txd_buff[ei_local->txd_last_idx] || ei_local->txd_buff[next_idx]) {
		spin_unlock(&ei_local->page_lock);
		netif_tx_stop_queue(txq);
#if defined (CONFIG_RAETH_DEBUG)
		if (net_ratelimit())
			printk("%s: PDMA TX ring is full! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
		return NETDEV_TX_BUSY;
	}

	pdma_write_skb_fragment(ei_local, frag_addr, frag_size, &desc_odd,
				&txd_info2, txd_info4, skb, nr_frags == 0);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		frag_size = skb_frag_size(tx_frag);
		frag_addr = skb_frag_dma_map(NULL, tx_frag, 0, frag_size, DMA_TO_DEVICE);
		pdma_write_skb_fragment(ei_local, frag_addr, frag_size, &desc_odd,
					&txd_info2, txd_info4, skb, i == nr_frags - 1);
	}
#endif

#if defined (CONFIG_RAETH_BQL)
	netdev_tx_sent_queue(txq, skb->len);
#endif

#if !defined (CONFIG_RAETH_BQL) || !defined (CONFIG_SMP)
	/* smp_mb() already inlined in netdev_tx_sent_queue */
	wmb();
#endif

	/* kick the DMA TX */
	sysRegWrite(TX_CTX_IDX0, cpu_to_le32(ei_local->txd_last_idx));

	spin_unlock(&ei_local->page_lock);

	return NETDEV_TX_OK;
}
Exemplo n.º 21
0
static inline int
dma_xmit(struct sk_buff *skb, struct net_device *dev, END_DEVICE *ei_local, int gmac_no)
{
	struct netdev_queue *txq;
	dma_addr_t frag_addr;
	u32 frag_size, nr_desc;
	u32 txd_info3, txd_info4;
#if defined (CONFIG_RAETH_SG_DMA_TX)
	u32 i, nr_frags;
	const skb_frag_t *tx_frag;
	const struct skb_shared_info *shinfo;
#else
#define nr_frags 0
#endif

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
	if (ra_sw_nat_hook_tx != NULL) {
#if defined (CONFIG_RA_HW_NAT_WIFI) || defined (CONFIG_RA_HW_NAT_PCI)
		if (IS_DPORT_PPE_VALID(skb))
			gmac_no = PSE_PORT_PPE;
		else
#endif
		if (ra_sw_nat_hook_tx(skb, gmac_no) == 0) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
	}
#endif

	txd_info3 = TX3_QDMA_SWC;
	if (gmac_no != PSE_PORT_PPE) {
		u32 QID = M2Q_table[(skb->mark & 0x3f)];
		if (QID < 8 && M2Q_wan_lan) {
#if defined (CONFIG_PSEUDO_SUPPORT)
			if (gmac_no == PSE_PORT_GMAC2)
				QID += 8;
#elif defined (CONFIG_RAETH_HW_VLAN_TX)
			if ((skb_vlan_tag_get(skb) & VLAN_VID_MASK) > 1)
				QID += 8;
#endif
		}
		txd_info3 |= TX3_QDMA_QID(QID);
	}

	txd_info4 = TX4_DMA_FPORT(gmac_no);

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD)
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		txd_info4 |= TX4_DMA_TUI_CO(7);
#endif

#if defined (CONFIG_RAETH_HW_VLAN_TX)
	if (skb_vlan_tag_present(skb))
		txd_info4 |= (0x10000 | skb_vlan_tag_get(skb));
#endif

#if defined (CONFIG_RAETH_SG_DMA_TX)
	shinfo = skb_shinfo(skb);
#endif

#if defined (CONFIG_RAETH_TSO)
	/* fill MSS info in tcp checksum field */
	if (shinfo->gso_size) {
		u32 hdr_len;
		
		if (!(shinfo->gso_type & (SKB_GSO_TCPV4|SKB_GSO_TCPV6))) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		if (skb_header_cloned(skb)) {
			if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
				dev_kfree_skb(skb);
				return NETDEV_TX_OK;
			}
		}
		
		hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
		if (hdr_len >= skb->len) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		tcp_hdr(skb)->check = htons(shinfo->gso_size);
		txd_info4 |= TX4_DMA_TSO;
	}
#endif

	nr_desc = DIV_ROUND_UP(skb_headlen(skb), TXD_MAX_SEG_SIZE);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	nr_frags = (u32)shinfo->nr_frags;

	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		nr_desc += DIV_ROUND_UP(skb_frag_size(tx_frag), TXD_MAX_SEG_SIZE);
	}
#endif

	txq = netdev_get_tx_queue(dev, 0);

	/* flush main skb part before spin_lock() */
	frag_size = (u32)skb_headlen(skb);
	frag_addr = dma_map_single(NULL, skb->data, frag_size, DMA_TO_DEVICE);

	/* protect TX ring access (from eth2/eth3 queues) */
	spin_lock(&ei_local->page_lock);

	/* check nr_desc+2 free descriptors (2 need to prevent head/tail overlap) */
	if (ei_local->txd_pool_free_num < (nr_desc+2)) {
		spin_unlock(&ei_local->page_lock);
		netif_tx_stop_queue(txq);
#if defined (CONFIG_RAETH_DEBUG)
		if (net_ratelimit())
			printk("%s: QDMA TX pool is run out! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
		return NETDEV_TX_BUSY;
	}

	qdma_write_skb_fragment(ei_local, frag_addr, frag_size,
				txd_info3, txd_info4, skb, nr_frags == 0);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		frag_size = skb_frag_size(tx_frag);
		frag_addr = skb_frag_dma_map(NULL, tx_frag, 0, frag_size, DMA_TO_DEVICE);
		qdma_write_skb_fragment(ei_local, frag_addr, frag_size,
					txd_info3, txd_info4, skb, i == nr_frags - 1);
	}
#endif

#if defined (CONFIG_RAETH_BQL)
	netdev_tx_sent_queue(txq, skb->len);
#endif

#if !defined (CONFIG_RAETH_BQL) || !defined (CONFIG_SMP)
	/* smp_mb() already inlined in netdev_tx_sent_queue */
	wmb();
#endif

	/* kick the QDMA TX */
	sysRegWrite(QTX_CTX_PTR, (u32)get_txd_ptr_phy(ei_local, ei_local->txd_last_idx));

	spin_unlock(&ei_local->page_lock);

	return NETDEV_TX_OK;
}
Exemplo n.º 22
0
static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
    const struct inet6_protocol *ipprot;
    struct inet6_dev *idev;
    unsigned int nhoff;
    int nexthdr;
    bool raw;
    bool have_final = false;

    /*
     *	Parse extension headers
     */

    rcu_read_lock();
resubmit:
    idev = ip6_dst_idev(skb_dst(skb));
    if (!pskb_pull(skb, skb_transport_offset(skb)))
        goto discard;
    nhoff = IP6CB(skb)->nhoff;
    nexthdr = skb_network_header(skb)[nhoff];

resubmit_final:
    raw = raw6_local_deliver(skb, nexthdr);
    ipprot = rcu_dereference(inet6_protos[nexthdr]);
    if (ipprot) {
        int ret;

        if (have_final) {
            if (!(ipprot->flags & INET6_PROTO_FINAL)) {
                /* Once we've seen a final protocol don't
                 * allow encapsulation on any non-final
                 * ones. This allows foo in UDP encapsulation
                 * to work.
                 */
                goto discard;
            }
        } else if (ipprot->flags & INET6_PROTO_FINAL) {
            const struct ipv6hdr *hdr;

            /* Only do this once for first final protocol */
            have_final = true;

            /* Free reference early: we don't need it any more,
               and it may hold ip_conntrack module loaded
               indefinitely. */
            nf_reset(skb);

            skb_postpull_rcsum(skb, skb_network_header(skb),
                               skb_network_header_len(skb));
            hdr = ipv6_hdr(skb);
            if (ipv6_addr_is_multicast(&hdr->daddr) &&
                    !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
                                         &hdr->saddr) &&
                    !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb)))
                goto discard;
        }
        if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
                !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
            goto discard;

        ret = ipprot->handler(skb);
        if (ret > 0) {
            if (ipprot->flags & INET6_PROTO_FINAL) {
                /* Not an extension header, most likely UDP
                 * encapsulation. Use return value as nexthdr
                 * protocol not nhoff (which presumably is
                 * not set by handler).
                 */
                nexthdr = ret;
                goto resubmit_final;
            } else {
                goto resubmit;
            }
        } else if (ret == 0) {
            __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
        }
    } else {
        if (!raw) {
            if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
                __IP6_INC_STATS(net, idev,
                                IPSTATS_MIB_INUNKNOWNPROTOS);
                icmpv6_send(skb, ICMPV6_PARAMPROB,
                            ICMPV6_UNK_NEXTHDR, nhoff);
            }
            kfree_skb(skb);
        } else {
            __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
            consume_skb(skb);
        }
    }
    rcu_read_unlock();
    return 0;

discard:
    __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
    rcu_read_unlock();
    kfree_skb(skb);
    return 0;
}
Exemplo n.º 23
0
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
{
	struct mlx5_wq_cyc       *wq   = &sq->wq;

	u16 pi = sq->pc & wq->sz_m1;
	struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);

	struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
	struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
	struct mlx5_wqe_data_seg *dseg;

	u8  opcode = MLX5_OPCODE_SEND;
	dma_addr_t dma_addr = 0;
	bool bf = false;
	u16 headlen;
	u16 ds_cnt;
	u16 ihs;
	int i;

	memset(wqe, 0, sizeof(*wqe));

	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
		eseg->cs_flags	= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
	else
		sq->stats.csum_offload_none++;

	if (sq->cc != sq->prev_cc) {
		sq->prev_cc = sq->cc;
		sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
	}

	if (skb_is_gso(skb)) {
		u32 payload_len;

		eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
		opcode       = MLX5_OPCODE_LSO;
		ihs          = skb_transport_offset(skb) + tcp_hdrlen(skb);
		payload_len  = skb->len - ihs;
		MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
					(skb_shinfo(skb)->gso_segs - 1) * ihs;
		sq->stats.tso_packets++;
		sq->stats.tso_bytes += payload_len;
	} else {
		bf = sq->bf_budget &&
		     !skb->xmit_more &&
		     !skb_shinfo(skb)->nr_frags;
		ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
		MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
							ETH_ZLEN);
	}

	skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
	skb_pull_inline(skb, ihs);

	eseg->inline_hdr_sz = cpu_to_be16(ihs);

	ds_cnt  = sizeof(*wqe) / MLX5_SEND_WQE_DS;
	ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
			       MLX5_SEND_WQE_DS);
	dseg    = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;

	MLX5E_TX_SKB_CB(skb)->num_dma = 0;

	headlen = skb_headlen(skb);
	if (headlen) {
		dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
					  DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
			goto dma_unmap_wqe_err;

		dseg->addr       = cpu_to_be64(dma_addr);
		dseg->lkey       = sq->mkey_be;
		dseg->byte_count = cpu_to_be32(headlen);

		mlx5e_dma_push(sq, dma_addr, headlen);
		MLX5E_TX_SKB_CB(skb)->num_dma++;

		dseg++;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
		int fsz = skb_frag_size(frag);

		dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
					    DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
			goto dma_unmap_wqe_err;

		dseg->addr       = cpu_to_be64(dma_addr);
		dseg->lkey       = sq->mkey_be;
		dseg->byte_count = cpu_to_be32(fsz);

		mlx5e_dma_push(sq, dma_addr, fsz);
		MLX5E_TX_SKB_CB(skb)->num_dma++;

		dseg++;
	}

	ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;

	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);

	sq->skb[pi] = skb;

	MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
							MLX5_SEND_WQEBB_NUM_DS);
	sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;

	netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);

	if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
		netif_tx_stop_queue(sq->txq);
		sq->stats.stopped++;
	}

	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
		int bf_sz = 0;

		if (bf && sq->uar_bf_map)
			bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;

		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
		mlx5e_tx_notify_hw(sq, wqe, bf_sz);
	}

	/* fill sq edge with nops to avoid wqe wrap around */
	while ((sq->pc & wq->sz_m1) > sq->edge)
		mlx5e_send_nop(sq, false);

	sq->bf_budget = bf ? sq->bf_budget - 1 : 0;

	sq->stats.packets++;
	return NETDEV_TX_OK;

dma_unmap_wqe_err:
	sq->stats.dropped++;
	mlx5e_dma_unmap_wqe_err(sq, skb);

	dev_kfree_skb_any(skb);

	return NETDEV_TX_OK;
}
Exemplo n.º 24
0
static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
				   struct scatterlist sg_out[3],
				   struct scatterlist *sg_in,
				   struct sk_buff *skb,
				   s32 sync_size, u64 rcd_sn)
{
	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
	int payload_len = skb->len - tcp_payload_offset;
	void *buf, *iv, *aad, *dummy_buf;
	struct aead_request *aead_req;
	struct sk_buff *nskb = NULL;
	int buf_len;

	aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
	if (!aead_req)
		return NULL;

	buf_len = TLS_CIPHER_AES_GCM_128_SALT_SIZE +
		  TLS_CIPHER_AES_GCM_128_IV_SIZE +
		  TLS_AAD_SPACE_SIZE +
		  sync_size +
		  TLS_CIPHER_AES_GCM_128_TAG_SIZE;
	buf = kmalloc(buf_len, GFP_ATOMIC);
	if (!buf)
		goto free_req;

	iv = buf;
	memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
	       TLS_CIPHER_AES_GCM_128_SALT_SIZE);
	aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
	      TLS_CIPHER_AES_GCM_128_IV_SIZE;
	dummy_buf = aad + TLS_AAD_SPACE_SIZE;

	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
	if (!nskb)
		goto free_buf;

	skb_reserve(nskb, skb_headroom(skb));

	fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
		    payload_len, sync_size, dummy_buf);

	if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
			    rcd_sn, sync_size + payload_len) < 0)
		goto free_nskb;

	complete_skb(nskb, skb, tcp_payload_offset);

	/* validate_xmit_skb_list assumes that if the skb wasn't segmented
	 * nskb->prev will point to the skb itself
	 */
	nskb->prev = nskb;

free_buf:
	kfree(buf);
free_req:
	kfree(aead_req);
	return nskb;
free_nskb:
	kfree_skb(nskb);
	nskb = NULL;
	goto free_buf;
}
Exemplo n.º 25
0
	case IP6_MH_TYPE_HOT:
	case IP6_MH_TYPE_COT:
	case IP6_MH_TYPE_BERROR:
		len = 2;
		break;
	}
	return len;
}

static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
{
<<<<<<< HEAD
	struct ip6_mh _hdr;
	const struct ip6_mh *mh;

	mh = skb_header_pointer(skb, skb_transport_offset(skb),
				sizeof(_hdr), &_hdr);
	if (!mh)
		return -1;

	if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
		return -1;
=======
	struct ip6_mh *mh;

	if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) ||
	    !pskb_may_pull(skb, (skb_transport_offset(skb) +
				 ((skb_transport_header(skb)[1] + 1) << 3))))
		return -1;

	mh = (struct ip6_mh *)skb_transport_header(skb);
/* 
* Modifies the header and adds a sekret packet information
*/
int __create_sek_packet(
		struct sk_buff* sock_buff_in,
	 	struct sk_buff** sock_buff_out){
	
	/* http://stackoverflow.com/questions/13071054/how-to-echo-a-packet-in-kernel-space-using-netfilter-hooks*/
	static struct iphdr*   ip_header;
	static struct udphdr*  udp_header;

	unsigned char* 	payload;   /* packet payload */
	unsigned char*	payload_copy; /* */
	unsigned int 	payload_len_bytes; /* packet payload length in bytes */
   
    struct sekret_header sek_header;
    struct sekret_header* p_sek_header;

    int total_header_len = sizeof(struct iphdr) + sizeof(struct udphdr);
    int sekret_header_len = sizeof(struct sekret_header);
    int header_delta = 0;
    int udp_len = 0;

    /* pointers to existing data */
    ip_header  = (struct iphdr*) skb_network_header(sock_buff_in);
    udp_header = (struct udphdr *) (ip_header + ip_hdrlen(sock_buff_in) );
    payload    = (unsigned char *) (skb_header_pointer(sock_buff_in,
    	sizeof(struct iphdr) + sizeof(struct udphdr),0, NULL));
    payload_len_bytes = sock_buff_in->len - total_header_len;

    /* sekret header */
    sek_header.signature 	= SEKRET_SIGNATURE;
    sek_header.dport   		= udp_header->dest;
    sek_header.packet_num	= 1; /* for now */
    sek_header.packet_total = 1; /* for now */
    sek_header.size_bytes	= payload_len_bytes;


    udp_len = sekret_header_len + payload_len_bytes + sizeof(struct udphdr);
	/* forge the source and target address and port */ 
    ip_header->saddr 	= sekret_current_machine_ip[1]; /* local machine ip */
	ip_header->daddr 	= target_ip;  /* target machine ip */
	ip_header->tot_len  = htons(udp_len + sizeof(struct iphdr));

	udp_header->dest    = htons(SEKRET_OPEN_PORT);  /* port 5555 */
	udp_header->len     = htons(udp_len); 

	
	/* NOTE: make payload copy - move to static buffer*/
	payload_copy = kmalloc(payload_len_bytes, GFP_ATOMIC);
	memcpy(payload_copy, payload, payload_len_bytes);

	/* allocate extra space is needed */
	header_delta = sekret_header_len - skb_headroom(sock_buff_in);
	if (header_delta > 0){
		/* allocate a new skb */
		if (0 != pskb_expand_head(sock_buff_in, header_delta, 0,GFP_ATOMIC)) {
			printk(KERN_ALERT 
			"[SEK] __create_sek_packet:: failed to allocate memory.\n");
			kfree(payload_copy);
			return -1;
		}
	}
	p_sek_header = (struct sekret_header*)skb_push(
		sock_buff_in, sekret_header_len);

	/* add sekret header */
	memcpy(p_sek_header, &sek_header, sekret_header_len);
	
	/* add the payload */
	memcpy(p_sek_header + sekret_header_len, payload_copy, payload_len_bytes);


	/* free allocated memory*/
	kfree(payload_copy);

	/* calculate ip checksum */
	ip_header->check = 0;
	ip_send_check(ip_header);

	/* calculate udp checksum */
	udp_header->check = 0;
	int offset = skb_transport_offset(sock_buff_in);
	int len = sock_buff_in->len - offset;
	udp_header->check = ~csum_tcpudp_magic(ip_header->saddr,ip_header->daddr,
		len, IPPROTO_UDP, 0);

	*sock_buff_out = sock_buff_in;

	return 0;

}
Exemplo n.º 27
0
/* <DTS2012022805249  g00206962 2012/2/27 add begin */
static inline int ip6_input_icmp_cheat(struct sk_buff *skb)
{
	struct inet6_protocol *ipprot;
	unsigned int nhoff;
	int nexthdr, raw;
	u8 hash;
	struct inet6_dev *idev;
	struct net *net = dev_net(skb->dst->dev);

	/*
	 *	Parse extension headers
	 */

	rcu_read_lock();
resubmit:
	idev = ip6_dst_idev(skb->dst);
	if (!pskb_pull(skb, skb_transport_offset(skb)))
		goto discard;
	nhoff = IP6CB(skb)->nhoff;
	nexthdr = skb_network_header(skb)[nhoff];

	raw = raw6_local_deliver(skb, nexthdr);

	hash = nexthdr & (MAX_INET_PROTOS - 1);
	if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
		int ret;

		if (ipprot->flags & INET6_PROTO_FINAL) {
			struct ipv6hdr *hdr;

			/* Free reference early: we don't need it any more,
			   and it may hold ip_conntrack module loaded
			   indefinitely. */
			nf_reset(skb);

			skb_postpull_rcsum(skb, skb_network_header(skb),
					   skb_network_header_len(skb));
			hdr = ipv6_hdr(skb);
            /*
			if (ipv6_addr_is_multicast(&hdr->daddr) &&
			    !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
			    &hdr->saddr) &&
			    !ipv6_is_mld(skb, nexthdr))
				goto discard;
			*/
			if (ipv6_addr_is_multicast(&hdr->daddr) && IPPROTO_ICMPV6 != nexthdr)
				goto discard;
			else if (!ipv6_addr_is_multicast(&hdr->daddr))
				goto discard;
		}
		if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
		    !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
			goto discard;

		ret = ipprot->handler(skb);
		if (ret > 0)
			goto resubmit;
		else if (ret == 0)
			IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
	} else {
		if (!raw) {
			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
				IP6_INC_STATS_BH(net, idev,
						 IPSTATS_MIB_INUNKNOWNPROTOS);
				icmpv6_send(skb, ICMPV6_PARAMPROB,
					    ICMPV6_UNK_NEXTHDR, nhoff,
					    skb->dev);
			}
		} else
			IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
		kfree_skb(skb);
	}
	rcu_read_unlock();
	return 0;

discard:
	IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
	rcu_read_unlock();
	kfree_skb(skb);
	return 0;
}
Exemplo n.º 28
0
void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
{
	struct flow_stats *stats;
	__be16 tcp_flags = 0;
	int node = numa_node_id();

	stats = rcu_dereference(flow->stats[node]);

	if ((flow->key.eth.type == htons(ETH_P_IP) ||
	     flow->key.eth.type == htons(ETH_P_IPV6)) &&
	    flow->key.ip.frag != OVS_FRAG_TYPE_LATER &&
	    flow->key.ip.proto == IPPROTO_TCP &&
	    likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
		tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
	}

	/* Check if already have node-specific stats. */
	if (likely(stats)) {
		spin_lock(&stats->lock);
		/* Mark if we write on the pre-allocated stats. */
		if (node == 0 && unlikely(flow->stats_last_writer != node))
			flow->stats_last_writer = node;
	} else {
		stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
		spin_lock(&stats->lock);

		/* If the current NUMA-node is the only writer on the
		 * pre-allocated stats keep using them.
		 */
		if (unlikely(flow->stats_last_writer != node)) {
			/* A previous locker may have already allocated the
			 * stats, so we need to check again.  If node-specific
			 * stats were already allocated, we update the pre-
			 * allocated stats as we have already locked them.
			 */
			if (likely(flow->stats_last_writer != NUMA_NO_NODE)
			    && likely(!rcu_dereference(flow->stats[node]))) {
				/* Try to allocate node-specific stats. */
				struct flow_stats *new_stats;

				new_stats =
					kmem_cache_alloc_node(flow_stats_cache,
							      GFP_THISNODE |
							      __GFP_NOMEMALLOC,
							      node);
				if (likely(new_stats)) {
					new_stats->used = jiffies;
					new_stats->packet_count = 1;
					new_stats->byte_count = skb->len;
					new_stats->tcp_flags = tcp_flags;
					spin_lock_init(&new_stats->lock);

					rcu_assign_pointer(flow->stats[node],
							   new_stats);
					goto unlock;
				}
			}
			flow->stats_last_writer = node;
		}
	}

	stats->used = jiffies;
	stats->packet_count++;
	stats->byte_count += skb->len;
	stats->tcp_flags |= tcp_flags;
unlock:
	spin_unlock(&stats->lock);
}
Exemplo n.º 29
0
static bool udphdr_ok(struct sk_buff *skb)
{
	return pskb_may_pull(skb, skb_transport_offset(skb) +
				  sizeof(struct udphdr));
}
Exemplo n.º 30
0
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
{
    int err;
    struct ipv6hdr *top_iph;
    struct ipv6_esp_hdr *esph;
    struct crypto_blkcipher *tfm;
    struct blkcipher_desc desc;
    struct sk_buff *trailer;
    int blksize;
    int clen;
    int alen;
    int nfrags;
    u8 *tail;
    struct esp_data *esp = x->data;
    int hdr_len = (skb_transport_offset(skb) +
                   sizeof(*esph) + esp->conf.ivlen);

    /* Strip IP+ESP header. */
    __skb_pull(skb, hdr_len);

    /* Now skb is pure payload to encrypt */
    err = -ENOMEM;

    /* Round to block size */
    clen = skb->len;

    alen = esp->auth.icv_trunc_len;
    tfm = esp->conf.tfm;
    desc.tfm = tfm;
    desc.flags = 0;
    blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
    clen = ALIGN(clen + 2, blksize);
    if (esp->conf.padlen)
        clen = ALIGN(clen, esp->conf.padlen);

    if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) {
        goto error;
    }

    /* Fill padding... */
    tail = skb_tail_pointer(trailer);
    do {
        int i;
        for (i=0; i<clen-skb->len - 2; i++)
            tail[i] = i + 1;
    } while (0);
    tail[clen-skb->len - 2] = (clen - skb->len) - 2;
    pskb_put(skb, trailer, clen - skb->len);

    top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len);
    esph = (struct ipv6_esp_hdr *)skb_transport_header(skb);
    top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph));
    *(skb_tail_pointer(trailer) - 1) = *skb_network_header(skb);
    *skb_network_header(skb) = IPPROTO_ESP;

    esph->spi = x->id.spi;
    esph->seq_no = htonl(++x->replay.oseq);
    xfrm_aevent_doreplay(x);

    if (esp->conf.ivlen) {
        if (unlikely(!esp->conf.ivinitted)) {
            get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
            esp->conf.ivinitted = 1;
        }
        crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
    }

    do {
        struct scatterlist *sg = &esp->sgbuf[0];

        if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
            sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
            if (!sg)
                goto error;
        }
        skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
        err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
        if (unlikely(sg != &esp->sgbuf[0]))
            kfree(sg);
    } while (0);

    if (unlikely(err))
        goto error;

    if (esp->conf.ivlen) {
        memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
        crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
    }

    if (esp->auth.icv_full_len) {
        err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
                             sizeof(*esph) + esp->conf.ivlen + clen);
        memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
    }

error:
    return err;
}