Ejemplo n.º 1
0
static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
			     int bytes)
{
	struct tls_context *tls_ctx = tls_get_ctx(sk);
	struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
	struct scatterlist *sg = ctx->sg_plaintext_data;
	int copy, i, rc = 0;

	for (i = tls_ctx->pending_open_record_frags;
	     i < ctx->sg_plaintext_num_elem; ++i) {
		copy = sg[i].length;
		if (copy_from_iter(
				page_address(sg_page(&sg[i])) + sg[i].offset,
				copy, from) != copy) {
			rc = -EFAULT;
			goto out;
		}
		bytes -= copy;

		++tls_ctx->pending_open_record_frags;

		if (!bytes)
			break;
	}

out:
	return rc;
}
Ejemplo n.º 2
0
static inline ssize_t vhci_get_user(struct vhci_data *data,
				    struct iov_iter *from)
{
	size_t len = iov_iter_count(from);
	struct sk_buff *skb;
	__u8 pkt_type, opcode;
	int ret;

	if (len < 2 || len > HCI_MAX_FRAME_SIZE)
		return -EINVAL;

	skb = bt_skb_alloc(len, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

	if (copy_from_iter(skb_put(skb, len), len, from) != len) {
		kfree_skb(skb);
		return -EFAULT;
	}

	pkt_type = *((__u8 *) skb->data);
	skb_pull(skb, 1);

	switch (pkt_type) {
	case HCI_EVENT_PKT:
	case HCI_ACLDATA_PKT:
	case HCI_SCODATA_PKT:
		if (!data->hdev) {
			kfree_skb(skb);
			return -ENODEV;
		}

		hci_skb_pkt_type(skb) = pkt_type;

		ret = hci_recv_frame(data->hdev, skb);
		break;

	case HCI_VENDOR_PKT:
		cancel_delayed_work_sync(&data->open_timeout);

		opcode = *((__u8 *) skb->data);
		skb_pull(skb, 1);

		if (skb->len > 0) {
			kfree_skb(skb);
			return -EINVAL;
		}

		kfree_skb(skb);

		ret = vhci_create_device(data, opcode);
		break;

	default:
		kfree_skb(skb);
		return -EINVAL;
	}

	return (ret < 0) ? ret : len;
}
Ejemplo n.º 3
0
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i)
{
	if (i->type & (ITER_BVEC|ITER_KVEC)) {
		void *kaddr = kmap_atomic(page);
		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
		kunmap_atomic(kaddr);
		return wanted;
	} else
		return copy_page_from_iter_iovec(page, offset, bytes, i);
}
Ejemplo n.º 4
0
Archivo: file.c Proyecto: 19Dan01/linux
static ssize_t
ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file_inode(file);
	size_t already_written = 0;
	size_t bufsize;
	int errno;
	void *bouncebuffer;
	off_t pos;

	ncp_dbg(1, "enter %pD2\n", file);
	errno = generic_write_checks(iocb, from);
	if (errno <= 0)
		return errno;

	errno = ncp_make_open(inode, O_WRONLY);
	if (errno) {
		ncp_dbg(1, "open failed, error=%d\n", errno);
		return errno;
	}
	bufsize = NCP_SERVER(inode)->buffer_size;

	errno = file_update_time(file);
	if (errno)
		goto outrel;

	bouncebuffer = vmalloc(bufsize);
	if (!bouncebuffer) {
		errno = -EIO;	/* -ENOMEM */
		goto outrel;
	}
	pos = iocb->ki_pos;
	while (iov_iter_count(from)) {
		int written_this_time;
		size_t to_write = min_t(size_t,
				      bufsize - (pos % bufsize),
				      iov_iter_count(from));

		if (copy_from_iter(bouncebuffer, to_write, from) != to_write) {
			errno = -EFAULT;
			break;
		}
		if (ncp_write_kernel(NCP_SERVER(inode), 
		    NCP_FINFO(inode)->file_handle,
		    pos, to_write, bouncebuffer, &written_this_time) != 0) {
			errno = -EIO;
			break;
		}
		pos += written_this_time;
		already_written += written_this_time;

		if (written_this_time != to_write)
			break;
	}
	vfree(bouncebuffer);

	iocb->ki_pos = pos;

	if (pos > i_size_read(inode)) {
		mutex_lock(&inode->i_mutex);
		if (pos > i_size_read(inode))
			i_size_write(inode, pos);
		mutex_unlock(&inode->i_mutex);
	}
	ncp_dbg(1, "exit %pD2\n", file);
outrel:
	ncp_inode_close(inode);		
	return already_written ? already_written : errno;
}
Ejemplo n.º 5
0
static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
		pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
{
	return copy_from_iter(addr, bytes, i);
}
Ejemplo n.º 6
0
static inline int
kni_vhost_net_tx(struct kni_dev *kni, struct msghdr *m,
		 unsigned offset, unsigned len)
{
	struct rte_kni_mbuf *pkt_kva = NULL;
	struct rte_kni_mbuf *pkt_va = NULL;
	int ret;

	KNI_DBG_TX("tx offset=%d, len=%d, iovlen=%d\n",
#ifdef HAVE_IOV_ITER_MSGHDR
		   offset, len, (int)m->msg_iter.iov->iov_len);
#else
		   offset, len, (int)m->msg_iov->iov_len);
#endif

	/**
	 * Check if it has at least one free entry in tx_q and
	 * one entry in alloc_q.
	 */
	if (kni_fifo_free_count(kni->tx_q) == 0 ||
	    kni_fifo_count(kni->alloc_q) == 0) {
		/**
		 * If no free entry in tx_q or no entry in alloc_q,
		 * drops skb and goes out.
		 */
		goto drop;
	}

	/* dequeue a mbuf from alloc_q */
	ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
	if (likely(ret == 1)) {
		void *data_kva;

		pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
		data_kva = pkt_kva->buf_addr + pkt_kva->data_off
		           - kni->mbuf_va + kni->mbuf_kva;

#ifdef HAVE_IOV_ITER_MSGHDR
		copy_from_iter(data_kva, len, &m->msg_iter);
#else
		memcpy_fromiovecend(data_kva, m->msg_iov, offset, len);
#endif

		if (unlikely(len < ETH_ZLEN)) {
			memset(data_kva + len, 0, ETH_ZLEN - len);
			len = ETH_ZLEN;
		}
		pkt_kva->pkt_len = len;
		pkt_kva->data_len = len;

		/* enqueue mbuf into tx_q */
		ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
		if (unlikely(ret != 1)) {
			/* Failing should not happen */
			KNI_ERR("Fail to enqueue mbuf into tx_q\n");
			goto drop;
		}
	} else {
		/* Failing should not happen */
		KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
		goto drop;
	}

	/* update statistics */
	kni->stats.tx_bytes += len;
	kni->stats.tx_packets++;

	return 0;

drop:
	/* update statistics */
	kni->stats.tx_dropped++;

	return 0;
}
Ejemplo n.º 7
0
static inline ssize_t vhci_get_user(struct vhci_data *data,
				    struct iov_iter *from)
{
	size_t len = iov_iter_count(from);
	struct sk_buff *skb;
	__u8 pkt_type, opcode;
	int ret;
#else
static inline ssize_t vhci_get_user(struct vhci_data *data,
				    const struct iovec *iov,
				    unsigned long count)
{
	size_t len = iov_length(iov, count);
	struct sk_buff *skb;
	__u8 pkt_type, opcode;
	unsigned long i;
	int ret;
#endif

	if (len < 2 || len > HCI_MAX_FRAME_SIZE)
		return -EINVAL;

	skb = bt_skb_alloc(len, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0)
	if (copy_from_iter(skb_put(skb, len), len, from) != len) {
		kfree_skb(skb);
		return -EFAULT;
	}
#else
	for (i = 0; i < count; i++) {
		if (copy_from_user(skb_put(skb, iov[i].iov_len),
				   iov[i].iov_base, iov[i].iov_len)) {
			kfree_skb(skb);
			return -EFAULT;
		}
	}
#endif

	pkt_type = *((__u8 *) skb->data);
	skb_pull(skb, 1);

	switch (pkt_type) {
	case HCI_EVENT_PKT:
	case HCI_ACLDATA_PKT:
	case HCI_SCODATA_PKT:
		if (!data->hdev) {
			kfree_skb(skb);
			return -ENODEV;
		}

		bt_cb(skb)->pkt_type = pkt_type;

		ret = hci_recv_frame(data->hdev, skb);
		break;

	case HCI_VENDOR_PKT:
		if (data->hdev) {
			kfree_skb(skb);
			return -EBADFD;
		}

		cancel_delayed_work_sync(&data->open_timeout);

		opcode = *((__u8 *) skb->data);
		skb_pull(skb, 1);

		if (skb->len > 0) {
			kfree_skb(skb);
			return -EINVAL;
		}

		kfree_skb(skb);

		ret = vhci_create_device(data, opcode);
		break;

	default:
		kfree_skb(skb);
		return -EINVAL;
	}

	return (ret < 0) ? ret : len;
}