Пример #1
0
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
	int offset, int len, int odd, struct sk_buff *skb),
	void *from, int length, int transhdrlen,
	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
	struct rt6_info *rt, unsigned int flags, int dontfrag)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct inet_cork *cork;
	struct sk_buff *skb, *skb_prev = NULL;
	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
	int exthdrlen;
	int dst_exthdrlen;
	int hh_len;
	int copy;
	int err;
	int offset = 0;
	int csummode = CHECKSUM_NONE;
	__u8 tx_flags = 0;

	if (flags&MSG_PROBE)
		return 0;
	cork = &inet->cork.base;
	if (skb_queue_empty(&sk->sk_write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (WARN_ON(np->cork.opt))
				return -EINVAL;

			np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
			if (unlikely(np->cork.opt == NULL))
				return -ENOBUFS;

			np->cork.opt->tot_len = opt->tot_len;
			np->cork.opt->opt_flen = opt->opt_flen;
			np->cork.opt->opt_nflen = opt->opt_nflen;

			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
							    sk->sk_allocation);
			if (opt->dst0opt && !np->cork.opt->dst0opt)
				return -ENOBUFS;

			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
							    sk->sk_allocation);
			if (opt->dst1opt && !np->cork.opt->dst1opt)
				return -ENOBUFS;

			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
							   sk->sk_allocation);
			if (opt->hopopt && !np->cork.opt->hopopt)
				return -ENOBUFS;

			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
							    sk->sk_allocation);
			if (opt->srcrt && !np->cork.opt->srcrt)
				return -ENOBUFS;

			/* need source address above miyazawa*/
		}
		dst_hold(&rt->dst);
		cork->dst = &rt->dst;
		inet->cork.fl.u.ip6 = *fl6;
		np->cork.hop_limit = hlimit;
		np->cork.tclass = tclass;
		if (rt->dst.flags & DST_XFRM_TUNNEL)
			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
		else
			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
		if (np->frag_size < mtu) {
			if (np->frag_size)
				mtu = np->frag_size;
		}
		cork->fragsize = mtu;
		if (dst_allfrag(rt->dst.path))
			cork->flags |= IPCORK_ALLFRAG;
		cork->length = 0;
		sk->sk_sndmsg_page = NULL;
		sk->sk_sndmsg_off = 0;
		exthdrlen = (opt ? opt->opt_flen : 0);
		length += exthdrlen;
		transhdrlen += exthdrlen;
		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
	} else {
		rt = (struct rt6_info *)cork->dst;
		fl6 = &inet->cork.fl.u.ip6;
		opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		dst_exthdrlen = 0;
		mtu = cork->fragsize;
	}
	orig_mtu = mtu;

	hh_len = LL_RESERVED_SPACE(rt->dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
			(opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
			ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
			return -EMSGSIZE;
		}
	}

	/* For UDP, check if TX timestamp is enabled */
	if (sk->sk_type == SOCK_DGRAM) {
		err = sock_tx_timestamp(sk, &tx_flags);
		if (err)
			goto error;
	}

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji
	 */

	if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
					   sk->sk_protocol == IPPROTO_RAW)) {
		ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
		return -EMSGSIZE;
	}

	skb = skb_peek_tail(&sk->sk_write_queue);
	cork->length += length;
	if (((length > mtu) ||
	     (skb && skb_has_frags(skb))) &&
	    (sk->sk_protocol == IPPROTO_UDP) &&
	    (rt->dst.dev->features & NETIF_F_UFO) &&
	    (sk->sk_type == SOCK_DGRAM)) {
		err = ip6_ufo_append_data(sk, getfrag, from, length,
					  hh_len, fragheaderlen,
					  transhdrlen, mtu, flags, rt);
		if (err)
			goto error;
		return 0;
	}

	if (!skb)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
alloc_new_skb:
			/* There's no room in the current skb */
			if (skb)
				fraggap = skb->len - maxfraglen;
			else
				fraggap = 0;
			/* update mtu and maxfraglen if necessary */
			if (skb == NULL || skb_prev == NULL)
				ip6_append_data_mtu(&mtu, &maxfraglen,
						    fragheaderlen, skb, rt,
						    orig_mtu);

			skb_prev = skb;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;

			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
			if ((flags & MSG_MORE) &&
			    !(rt->dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			alloclen += dst_exthdrlen;

			if (datalen != length + fraggap) {
				/*
				 * this is not the last fragment, the trailer
				 * space is regarded as data space.
				 */
				datalen += rt->dst.trailer_len;
			}

			alloclen += rt->dst.trailer_len;
			fraglen = datalen + fragheaderlen;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
				else {
					/* Only the initial fragment
					 * is time stamped.
					 */
					tx_flags = 0;
				}
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation and ipsec header */
			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
				    dst_exthdrlen);

			if (sk->sk_type == SOCK_DGRAM)
				skb_shinfo(skb)->tx_flags = tx_flags;

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb_set_network_header(skb, exthdrlen);
			data += fragheaderlen;
			skb->transport_header = (skb->network_header +
						 fragheaderlen);
			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				pskb_trim_unique(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;

			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			dst_exthdrlen = 0;
			csummode = CHECKSUM_NONE;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->sk_write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
			struct page *page = sk->sk_sndmsg_page;
			int off = sk->sk_sndmsg_off;
			unsigned int left;

			if (page && (left = PAGE_SIZE - off) > 0) {
				if (copy >= left)
					copy = left;
				if (page != skb_frag_page(frag)) {
					if (i == MAX_SKB_FRAGS) {
						err = -EMSGSIZE;
						goto error;
					}
					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
					skb_frag_ref(skb, i);
					frag = &skb_shinfo(skb)->frags[i];
				}
			} else if(i < MAX_SKB_FRAGS) {
				if (copy > PAGE_SIZE)
					copy = PAGE_SIZE;
				page = alloc_pages(sk->sk_allocation, 0);
				if (page == NULL) {
					err = -ENOMEM;
					goto error;
				}
				sk->sk_sndmsg_page = page;
				sk->sk_sndmsg_off = 0;

				skb_fill_page_desc(skb, i, page, 0, 0);
				frag = &skb_shinfo(skb)->frags[i];
			} else {
				err = -EMSGSIZE;
				goto error;
			}
			if (getfrag(from,
				    skb_frag_address(frag) + skb_frag_size(frag),
				    offset, copy, skb->len, skb) < 0) {
				err = -EFAULT;
				goto error;
			}
			sk->sk_sndmsg_off += copy;
			skb_frag_size_add(frag, copy);
			skb->len += copy;
			skb->data_len += copy;
			skb->truesize += copy;
			atomic_add(copy, &sk->sk_wmem_alloc);
		}
		offset += copy;
		length -= copy;
	}
	return 0;
error:
	cork->length -= length;
	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Пример #2
0
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb),
		    void *from, int length, int transhdrlen,
		    int hlimit, struct ipv6_txoptions *opt, struct flowi *fl, struct rt6_info *rt,
		    unsigned int flags)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct sk_buff *skb;
	unsigned int maxfraglen, fragheaderlen;
	int exthdrlen;
	int hh_len;
	int mtu;
	int copy;
	int err;
	int offset = 0;
	int csummode = CHECKSUM_NONE;

	if (flags&MSG_PROBE)
		return 0;
	if (skb_queue_empty(&sk->sk_write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (np->cork.opt == NULL) {
				np->cork.opt = kmalloc(opt->tot_len,
						       sk->sk_allocation);
				if (unlikely(np->cork.opt == NULL))
					return -ENOBUFS;
			} else if (np->cork.opt->tot_len < opt->tot_len) {
				printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
				return -EINVAL;
			}
			memcpy(np->cork.opt, opt, opt->tot_len);
			inet->cork.flags |= IPCORK_OPT;
			/* need source address above miyazawa*/
		}
		dst_hold(&rt->u.dst);
		np->cork.rt = rt;
		inet->cork.fl = *fl;
		np->cork.hop_limit = hlimit;
		inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
		if (dst_allfrag(rt->u.dst.path))
			inet->cork.flags |= IPCORK_ALLFRAG;
		inet->cork.length = 0;
		sk->sk_sndmsg_page = NULL;
		sk->sk_sndmsg_off = 0;
		exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
		length += exthdrlen;
		transhdrlen += exthdrlen;
	} else {
		rt = np->cork.rt;
		fl = &inet->cork.fl;
		if (inet->cork.flags & IPCORK_OPT)
			opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		mtu = inet->cork.fragsize;
	}

	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + (opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
			ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
			return -EMSGSIZE;
		}
	}

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split 
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks 
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji 
	 */

	inet->cork.length += length;

	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
			struct sk_buff *skb_prev;
alloc_new_skb:
			skb_prev = skb;

			/* There's no room in the current skb */
			if (skb_prev)
				fraggap = skb_prev->len - maxfraglen;
			else
				fraggap = 0;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;
			if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen;

			fraglen = datalen + fragheaderlen;
			if ((flags & MSG_MORE) &&
			    !(rt->u.dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			/*
			 * The last fragment gets additional space at tail.
			 * Note: we overallocate on fragments with MSG_MODE
			 * because we have no idea if we're the last one.
			 */
			if (datalen == length + fraggap)
				alloclen += rt->u.dst.trailer_len;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message 
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation */
			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb->nh.raw = data + exthdrlen;
			data += fragheaderlen;
			skb->h.raw = data + exthdrlen;

			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				skb_trim(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;
			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			csummode = CHECKSUM_NONE;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->sk_write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
			struct page *page = sk->sk_sndmsg_page;
			int off = sk->sk_sndmsg_off;
			unsigned int left;

			if (page && (left = PAGE_SIZE - off) > 0) {
				if (copy >= left)
					copy = left;
				if (page != frag->page) {
					if (i == MAX_SKB_FRAGS) {
						err = -EMSGSIZE;
						goto error;
					}
					get_page(page);
					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
					frag = &skb_shinfo(skb)->frags[i];
				}
			} else if(i < MAX_SKB_FRAGS) {
				if (copy > PAGE_SIZE)
					copy = PAGE_SIZE;
				page = alloc_pages(sk->sk_allocation, 0);
				if (page == NULL) {
					err = -ENOMEM;
					goto error;
				}
				sk->sk_sndmsg_page = page;
				sk->sk_sndmsg_off = 0;

				skb_fill_page_desc(skb, i, page, 0, 0);
				frag = &skb_shinfo(skb)->frags[i];
				skb->truesize += PAGE_SIZE;
				atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
			} else {
				err = -EMSGSIZE;
				goto error;
			}
			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
				err = -EFAULT;
				goto error;
			}
			sk->sk_sndmsg_off += copy;
			frag->size += copy;
			skb->len += copy;
			skb->data_len += copy;
		}
		offset += copy;
		length -= copy;
	}
	return 0;
error:
	inet->cork.length -= length;
	IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Пример #3
0
/*
 * Deliver read data back to initiator.
 * XXX TBD handle resource problems later.
 */
int ft_queue_data_in(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp = NULL;
	struct fc_exch *ep;
	struct fc_lport *lport;
	struct scatterlist *sg = NULL;
	size_t remaining;
	u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
	u32 mem_off = 0;
	u32 fh_off = 0;
	u32 frame_off = 0;
	size_t frame_len = 0;
	size_t mem_len = 0;
	size_t tlen;
	size_t off_in_page;
	struct page *page = NULL;
	int use_sg;
	int error;
	void *page_addr;
	void *from;
	void *to = NULL;

	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	cmd->seq = lport->tt.seq_start_next(cmd->seq);

	remaining = se_cmd->data_length;

	/*
	 * Setup to use first mem list entry, unless no data.
	 */
	BUG_ON(remaining && !se_cmd->t_data_sg);
	if (remaining) {
		sg = se_cmd->t_data_sg;
		mem_len = sg->length;
		mem_off = sg->offset;
		page = sg_page(sg);
	}

	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
	use_sg = !(remaining % 4);

	while (remaining) {
		if (!mem_len) {
			sg = sg_next(sg);
			mem_len = min((size_t)sg->length, remaining);
			mem_off = sg->offset;
			page = sg_page(sg);
		}
		if (!frame_len) {
			/*
			 * If lport's has capability of Large Send Offload LSO)
			 * , then allow 'frame_len' to be as big as 'lso_max'
			 * if indicated transfer length is >= lport->lso_max
			 */
			frame_len = (lport->seq_offload) ? lport->lso_max :
							  cmd->sess->max_frame;
			frame_len = min(frame_len, remaining);
			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
			if (!fp)
				return -ENOMEM;
			to = fc_frame_payload_get(fp, 0);
			fh_off = frame_off;
			frame_off += frame_len;
			/*
			 * Setup the frame's max payload which is used by base
			 * driver to indicate HW about max frame size, so that
			 * HW can do fragmentation appropriately based on
			 * "gso_max_size" of underline netdev.
			 */
			fr_max_payload(fp) = cmd->sess->max_frame;
		}
		tlen = min(mem_len, frame_len);

		if (use_sg) {
			off_in_page = mem_off;
			BUG_ON(!page);
			get_page(page);
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
					   page, off_in_page, tlen);
			fr_len(fp) += tlen;
			fp_skb(fp)->data_len += tlen;
			fp_skb(fp)->truesize +=
					PAGE_SIZE << compound_order(page);
		} else {
			BUG_ON(!page);
			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
					   KM_SOFTIRQ0);
			page_addr = from;
			from += mem_off & ~PAGE_MASK;
			tlen = min(tlen, (size_t)(PAGE_SIZE -
						(mem_off & ~PAGE_MASK)));
			memcpy(to, from, tlen);
			kunmap_atomic(page_addr, KM_SOFTIRQ0);
			to += tlen;
		}

		mem_off += tlen;
		mem_len -= tlen;
		frame_len -= tlen;
		remaining -= tlen;

		if (frame_len &&
		    (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
			continue;
		if (!remaining)
			f_ctl |= FC_FC_END_SEQ;
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP, f_ctl, fh_off);
		error = lport->tt.seq_send(lport, cmd->seq, fp);
		if (error) {
			/* XXX For now, initiator will retry */
			pr_err_ratelimited("%s: Failed to send frame %p, "
						"xid <0x%x>, remaining %zu, "
						"lso_max <0x%x>\n",
						__func__, fp, ep->xid,
						remaining, lport->lso_max);
		}
	}
	return ft_queue_status(se_cmd);
}
Пример #4
0
/*
 * Send read data back to initiator.
 */
int ft_send_read_data(struct scst_cmd *cmd)
{
	struct ft_cmd *fcmd;
	struct fc_frame *fp = NULL;
	struct fc_exch *ep;
	struct fc_lport *lport;
	size_t remaining;
	u32 fh_off = 0;
	u32 frame_off;
	size_t frame_len = 0;
	size_t mem_len;
	u32 mem_off;
	size_t tlen;
	struct page *page;
	int use_sg;
	int error;
	void *to = NULL;
	u8 *from = NULL;
	int loop_limit = 10000;

	fcmd = scst_cmd_get_tgt_priv(cmd);
	ep = fc_seq_exch(fcmd->seq);
	lport = ep->lp;

	frame_off = fcmd->read_data_len;
	tlen = scst_cmd_get_resp_data_len(cmd);
	FT_IO_DBG("oid %x oxid %x resp_len %zd frame_off %u\n",
		  ep->oid, ep->oxid, tlen, frame_off);
	if (tlen <= frame_off)
		return SCST_TGT_RES_SUCCESS;
	remaining = tlen - frame_off;
	if (remaining > UINT_MAX)
		FT_ERR("oid %x oxid %x resp_len %zd frame_off %u\n",
		       ep->oid, ep->oxid, tlen, frame_off);

	mem_len = scst_get_buf_first(cmd, &from);
	mem_off = 0;
	if (!mem_len) {
		FT_IO_DBG("mem_len 0\n");
		return SCST_TGT_RES_SUCCESS;
	}
	FT_IO_DBG("sid %x oxid %x mem_len %zd frame_off %u remaining %zd\n",
		 ep->sid, ep->oxid, mem_len, frame_off, remaining);

	/*
	 * If we've already transferred some of the data, skip through
	 * the buffer over the data already sent and continue with the
	 * same sequence.  Otherwise, get a new sequence for the data.
	 */
	if (frame_off) {
		tlen = frame_off;
		while (mem_len <= tlen) {
			tlen -= mem_len;
			scst_put_buf(cmd, from);
			mem_len = scst_get_buf_next(cmd, &from);
			if (!mem_len)
				return SCST_TGT_RES_SUCCESS;
		}
		mem_len -= tlen;
		mem_off = tlen;
	} else
		fcmd->seq = lport->tt.seq_start_next(fcmd->seq);

	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
	use_sg = !(remaining % 4) && lport->sg_supp;

	while (remaining) {
		if (!loop_limit) {
			FT_ERR("hit loop limit.  remaining %zx mem_len %zx "
			       "frame_len %zx tlen %zx\n",
			       remaining, mem_len, frame_len, tlen);
			break;
		}
		loop_limit--;
		if (!mem_len) {
			scst_put_buf(cmd, from);
			mem_len = scst_get_buf_next(cmd, &from);
			mem_off = 0;
			if (!mem_len) {
				FT_ERR("mem_len 0 from get_buf_next\n");
				break;
			}
		}
		if (!frame_len) {
			frame_len = fcmd->max_lso_payload;
			frame_len = min(frame_len, remaining);
			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
			if (!fp) {
				FT_IO_DBG("frame_alloc failed. "
					  "use_sg %d frame_len %zd\n",
					  use_sg, frame_len);
				break;
			}
			fr_max_payload(fp) = fcmd->max_payload;
			to = fc_frame_payload_get(fp, 0);
			fh_off = frame_off;
		}
		tlen = min(mem_len, frame_len);
		BUG_ON(!tlen);
		BUG_ON(tlen > remaining);
		BUG_ON(tlen > mem_len);
		BUG_ON(tlen > frame_len);

		if (use_sg) {
			page = virt_to_page(from + mem_off);
			get_page(page);
			tlen = min_t(size_t, tlen,
				     PAGE_SIZE - (mem_off & ~PAGE_MASK));
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
					   page, offset_in_page(from + mem_off),
					   tlen);
			fr_len(fp) += tlen;
			fp_skb(fp)->data_len += tlen;
			fp_skb(fp)->truesize +=
					PAGE_SIZE << compound_order(page);
			frame_len -= tlen;
			if (skb_shinfo(fp_skb(fp))->nr_frags >= FC_FRAME_SG_LEN)
				frame_len = 0;
		} else {
			memcpy(to, from + mem_off, tlen);
			to += tlen;
			frame_len -= tlen;
		}

		mem_len -= tlen;
		mem_off += tlen;
		remaining -= tlen;
		frame_off += tlen;

		if (frame_len)
			continue;
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP,
			       remaining ? (FC_FC_EX_CTX | FC_FC_REL_OFF) :
			       (FC_FC_EX_CTX | FC_FC_REL_OFF | FC_FC_END_SEQ),
			       fh_off);
		error = lport->tt.seq_send(lport, fcmd->seq, fp);
		if (error) {
			WARN_ON(1);
			/* XXX For now, initiator will retry */
		} else
			fcmd->read_data_len = frame_off;
	}
	if (mem_len)
		scst_put_buf(cmd, from);
	if (remaining) {
		FT_IO_DBG("remaining read data %zd\n", remaining);
		return SCST_TGT_RES_QUEUE_FULL;
	}
	return SCST_TGT_RES_SUCCESS;
}
Пример #5
0
static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
{
	struct net_device *netdev = priv->netdev;
	struct ftmac100_rxdes *rxdes;
	struct sk_buff *skb;
	struct page *page;
	dma_addr_t map;
	int length;

	rxdes = ftmac100_rx_locate_first_segment(priv);
	if (!rxdes)
		return false;

	if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) {
		ftmac100_rx_drop_packet(priv);
		return true;
	}

	/*
                                                 
                                                         
  */
	if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
		BUG();

	/*                  */
	skb = netdev_alloc_skb_ip_align(netdev, 128);
	if (unlikely(!skb)) {
		if (net_ratelimit())
			netdev_err(netdev, "rx skb alloc failed\n");

		ftmac100_rx_drop_packet(priv);
		return true;
	}

	if (unlikely(ftmac100_rxdes_multicast(rxdes)))
		netdev->stats.multicast++;

	map = ftmac100_rxdes_get_dma_addr(rxdes);
	dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);

	length = ftmac100_rxdes_frame_length(rxdes);
	page = ftmac100_rxdes_get_page(rxdes);
	skb_fill_page_desc(skb, 0, page, 0, length);
	skb->len += length;
	skb->data_len += length;

	/*                                           */
	if (length > 64)
		skb->truesize += PAGE_SIZE;
	__pskb_pull_tail(skb, min(length, 64));

	ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);

	ftmac100_rx_pointer_advance(priv);

	skb->protocol = eth_type_trans(skb, netdev);

	netdev->stats.rx_packets++;
	netdev->stats.rx_bytes += skb->len;

	/*                               */
	netif_receive_skb(skb);

	(*processed)++;
	return true;
}
Пример #6
0
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb),
		    void *from, int length, int transhdrlen,
		    int hlimit, struct ipv6_txoptions *opt, struct flowi *fl, struct rt6_info *rt,
		    unsigned int flags)
{
	struct inet_opt *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct sk_buff *skb;
	unsigned int maxfraglen, fragheaderlen;
	int exthdrlen;
	int hh_len;
	int mtu;
	int copy = 0;
	int err;
	int offset = 0;
	int csummode = CHECKSUM_NONE;

	if (flags&MSG_PROBE)
		return 0;
	if (skb_queue_empty(&sk->write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (np->cork.opt == NULL)
				np->cork.opt = kmalloc(opt->tot_len, sk->allocation);
			memcpy(np->cork.opt, opt, opt->tot_len);
			inet->cork.flags |= IPCORK_OPT;
			/* need source address above miyazawa*/
		}
		dst_hold(&rt->u.dst);
		np->cork.rt = rt;
		np->cork.fl = fl;
		np->cork.hop_limit = hlimit;
		inet->cork.fragsize = mtu = dst_pmtu(&rt->u.dst);
		inet->cork.length = 0;
		inet->sndmsg_page = NULL;
		inet->sndmsg_off = 0;
		exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
		length += exthdrlen;
		transhdrlen += exthdrlen;
	} else {
		rt = np->cork.rt;
		if (inet->cork.flags & IPCORK_OPT)
			opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		mtu = inet->cork.fragsize;
	}

	hh_len = (rt->u.dst.dev->hard_header_len&~15) + 16;

	fragheaderlen = sizeof(struct ipv6hdr) + (opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
			ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
			return -EMSGSIZE;
		}
	}

	inet->cork.length += length;

	if ((skb = skb_peek_tail(&sk->write_queue)) == NULL)
		goto alloc_new_skb;

	while (length > 0) {
		if ((copy = maxfraglen - skb->len) <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int alloclen;
			BUG_TRAP(copy == 0);
alloc_new_skb:
			datalen = maxfraglen - fragheaderlen;
			if (datalen > length)
				datalen = length;
			fraglen = datalen + fragheaderlen;
			if ((flags & MSG_MORE) &&
			    !(rt->u.dst.dev->features&NETIF_F_SG))
				alloclen = maxfraglen;
			else
				alloclen = fraglen;
			alloclen += sizeof(struct frag_hdr);
			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len + 15,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->wmem_alloc) <= 2*sk->sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len + 15, 1,
							   sk->allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve 8 byte for fragmentation */
			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb->nh.raw = data + exthdrlen;
			data += fragheaderlen;
			skb->h.raw = data + exthdrlen;
			copy = datalen - transhdrlen;
			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, 0, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen;
			transhdrlen = 0;
			exthdrlen = 0;
			csummode = CHECKSUM_NONE;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
			struct page *page = inet->sndmsg_page;
			int off = inet->sndmsg_off;
			unsigned int left;

			if (page && (left = PAGE_SIZE - off) > 0) {
				if (copy >= left)
					copy = left;
				if (page != frag->page) {
					if (i == MAX_SKB_FRAGS) {
						err = -EMSGSIZE;
						goto error;
					}
					get_page(page);
					skb_fill_page_desc(skb, i, page, inet->sndmsg_off, 0);
					frag = &skb_shinfo(skb)->frags[i];
				}
			} else if(i < MAX_SKB_FRAGS) {
				if (copy > PAGE_SIZE)
					copy = PAGE_SIZE;
				page = alloc_pages(sk->allocation, 0);
				if (page == NULL) {
					err = -ENOMEM;
					goto error;
				}
				inet->sndmsg_page = page;
				inet->sndmsg_off = 0;

				skb_fill_page_desc(skb, i, page, 0, 0);
				frag = &skb_shinfo(skb)->frags[i];
				skb->truesize += PAGE_SIZE;
				atomic_add(PAGE_SIZE, &sk->wmem_alloc);
			} else {
				err = -EMSGSIZE;
				goto error;
			}
			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
				err = -EFAULT;
				goto error;
			}
			inet->sndmsg_off += copy;
			frag->size += copy;
			skb->len += copy;
			skb->data_len += copy;
		}
		offset += copy;
		length -= copy;
	}
	return 0;
error:
	inet->cork.length -= length;
	IP6_INC_STATS(Ip6OutDiscards);
	return err;
}
Пример #7
0
static void
aoecmd_ata_rw(struct aoedev *d, struct frame *f)
{
	struct aoe_hdr *h;
	struct aoe_atahdr *ah;
	struct buf *buf;
	struct sk_buff *skb;
	ulong bcnt;
	register sector_t sector;
	char writebit, extbit;

	writebit = 0x10;
	extbit = 0x4;

	buf = d->inprocess;

	sector = buf->sector;
	bcnt = buf->bv_resid;
	if (bcnt > d->maxbcnt)
		bcnt = d->maxbcnt;

	/* initialize the headers & frame */
	skb = f->skb;
	h = aoe_hdr(skb);
	ah = (struct aoe_atahdr *) (h+1);
	skb_put(skb, sizeof *h + sizeof *ah);
	memset(h, 0, skb->len);
	f->tag = aoehdr_atainit(d, h);
	f->waited = 0;
	f->buf = buf;
	f->bufaddr = buf->bufaddr;
	f->bcnt = bcnt;
	f->lba = sector;

	/* set up ata header */
	ah->scnt = bcnt >> 9;
	put_lba(ah, sector);
	if (d->flags & DEVFL_EXT) {
		ah->aflags |= AOEAFL_EXT;
	} else {
		extbit = 0;
		ah->lba3 &= 0x0f;
		ah->lba3 |= 0xe0;	/* LBA bit + obsolete 0xa0 */
	}

	if (bio_data_dir(buf->bio) == WRITE) {
		skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
			offset_in_page(f->bufaddr), bcnt);
		ah->aflags |= AOEAFL_WRITE;
		skb->len += bcnt;
		skb->data_len = bcnt;
	} else {
		writebit = 0;
	}

	ah->cmdstat = WIN_READ | writebit | extbit;

	/* mark all tracking fields and load out */
	buf->nframesout += 1;
	buf->bufaddr += bcnt;
	buf->bv_resid -= bcnt;
/* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */
	buf->resid -= bcnt;
	buf->sector += bcnt >> 9;
	if (buf->resid == 0) {
		d->inprocess = NULL;
	} else if (buf->bv_resid == 0) {
		buf->bv++;
		WARN_ON(buf->bv->bv_len == 0);
		buf->bv_resid = buf->bv->bv_len;
		buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
	}

	skb->dev = d->ifp;
	skb = skb_clone(skb, GFP_ATOMIC);
	if (skb == NULL)
		return;
	if (d->sendq_hd)
		d->sendq_tl->next = skb;
	else
		d->sendq_hd = skb;
	d->sendq_tl = skb;
}
Пример #8
0
int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
			      unsigned int count)
{
	struct iscsi_conn *conn = task->conn;
	struct scsi_cmnd *sc = task->sc;
	struct iscsi_tcp_task *tcp_task = task->dd_data;
	struct cxgb3i_task_data *tdata = tcp_task->dd_data;
	struct sk_buff *skb = tdata->skb;
	unsigned int datalen = count;
	int i, padlen = iscsi_padding(count);
	struct page *pg;
	char *dbuf = sc ? sc->request_buffer : task->data;

	cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
			task, task->sc, offset, count, skb);

	skb_put(skb, task->hdr_len);
	tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
	if (!count)
		return 0;

	tdata->offset = offset;
	tdata->count = count;
	if (sc && sc->use_sg) {
		struct scatterlist *sg = NULL;
		int err;

		tdata->offset = offset;
		tdata->count = count;
		err = sgl_seek_offset(sc->request_buffer, sc->use_sg,
					tdata->offset, &tdata->sgoffset, &sg);
		if (err < 0) {
			cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
					sc->use_sg, tdata->offset,
					sc->request_bufflen);
			return err;
		}
		err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
					tdata->frags, MAX_PDU_FRAGS);
		if (err < 0) {
			cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
					sc->use_sg, tdata->offset,
					sc->request_bufflen);
			return err;
		}
		tdata->nr_frags = err;

		if (tdata->nr_frags > MAX_SKB_FRAGS ||
		    (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
			char *dst = skb->data + task->hdr_len;
			skb_frag_t *frag = tdata->frags;

			/* data fits in the skb's headroom */
			for (i = 0; i < tdata->nr_frags; i++, frag++) {
				char *src = kmap_atomic(frag->page,
							KM_SOFTIRQ0);

				memcpy(dst, src+frag->page_offset, frag->size);
				dst += frag->size;
				kunmap_atomic(src, KM_SOFTIRQ0);
			}
			if (padlen) {
				memset(dst, 0, padlen);
				padlen = 0;
			}
			skb_put(skb, count + padlen);
		} else {
			/* data fit into frag_list */
			for (i = 0; i < tdata->nr_frags; i++)
				get_page(tdata->frags[i].page);

			memcpy(skb_shinfo(skb)->frags, tdata->frags,
				sizeof(skb_frag_t) * tdata->nr_frags);
			skb_shinfo(skb)->nr_frags = tdata->nr_frags;
			skb->len += count;
			skb->data_len += count;
			skb->truesize += count;
		}

	} else {
		pg = virt_to_page(dbuf);

		get_page(pg);
		skb_fill_page_desc(skb, 0, pg, offset_in_page(dbuf) + offset,
					count);
		skb->len += count;
		skb->data_len += count;
		skb->truesize += count;
	}

	if (padlen) {
		i = skb_shinfo(skb)->nr_frags;
		get_page(pad_page);
		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0,
				 padlen);

		skb->data_len += padlen;
		skb->truesize += padlen;
		skb->len += padlen;
	}

	return 0;
}
Пример #9
0
static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
{
	struct net_device *netdev = priv->netdev;
	struct ftmac100_rxdes *rxdes;
	struct sk_buff *skb;
	struct page *page;
	dma_addr_t map;
	int length;

	rxdes = ftmac100_rx_locate_first_segment(priv);
	if (!rxdes)
		return false;

	if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) {
		ftmac100_rx_drop_packet(priv);
		return true;
	}

	/*
	 * It is impossible to get multi-segment packets
	 * because we always provide big enough receive buffers.
	 */
	if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
		BUG();

	/* start processing */
	skb = netdev_alloc_skb_ip_align(netdev, 128);
	if (unlikely(!skb)) {
		if (net_ratelimit())
			netdev_err(netdev, "rx skb alloc failed\n");

		ftmac100_rx_drop_packet(priv);
		return true;
	}

	if (unlikely(ftmac100_rxdes_multicast(rxdes)))
		netdev->stats.multicast++;

	map = ftmac100_rxdes_get_dma_addr(rxdes);
	dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);

	length = ftmac100_rxdes_frame_length(rxdes);
	page = ftmac100_rxdes_get_page(rxdes);
	skb_fill_page_desc(skb, 0, page, 0, length);
	skb->len += length;
	skb->data_len += length;

	if (length > 128) {
		skb->truesize += PAGE_SIZE;
		/* We pull the minimum amount into linear part */
		__pskb_pull_tail(skb, ETH_HLEN);
	} else {
		/* Small frames are copied into linear part to free one page */
		__pskb_pull_tail(skb, length);
	}
	ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);

	ftmac100_rx_pointer_advance(priv);

	skb->protocol = eth_type_trans(skb, netdev);

	netdev->stats.rx_packets++;
	netdev->stats.rx_bytes += skb->len;

	/* push packet to protocol stack */
	netif_receive_skb(skb);

	(*processed)++;
	return true;
}