Esempio n. 1
0
static ssize_t
ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
		unsigned long nr_segs, loff_t pos)
{
	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;

	/*
	 * If we have encountered a bitmap-format file, the size limit
	 * is smaller than s_maxbytes, which is for extent-mapped files.
	 */

	if (!(ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE))) {
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
		size_t length = iov_length(iov, nr_segs);

		if (pos > sbi->s_bitmap_maxbytes)
			return -EFBIG;

		if (pos + length > sbi->s_bitmap_maxbytes) {
			nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
					      sbi->s_bitmap_maxbytes - pos);
		}
	}

	return generic_file_aio_write(iocb, iov, nr_segs, pos);
}
static int ii_iovec_shorten(struct iov_iter *i, size_t count)
{
	struct iovec *iov = (struct iovec *)i->data;
	i->nr_segs = iov_shorten(iov, i->nr_segs, count);
	i->count = min(i->count, count);
	return 0;
}
Esempio n. 3
0
static ssize_t
ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
		unsigned long nr_segs, loff_t pos)
{
	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
	int unaligned_aio = 0;
	int ret;

	trace_ext4_file_write(iocb->ki_filp->f_path.dentry, iocb->ki_left);
	/*
	 * If we have encountered a bitmap-format file, the size limit
	 * is smaller than s_maxbytes, which is for extent-mapped files.
	 */

	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
		size_t length = iov_length(iov, nr_segs);

		if ((pos > sbi->s_bitmap_maxbytes ||
		    (pos == sbi->s_bitmap_maxbytes && length > 0)))
			return -EFBIG;

		if (pos + length > sbi->s_bitmap_maxbytes) {
			nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
					      sbi->s_bitmap_maxbytes - pos);
		}
	} else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) &&
		   !is_sync_kiocb(iocb))) {
		unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
	}

	/* Unaligned direct AIO must be serialized; see comment above */
	if (unaligned_aio) {
		static unsigned long unaligned_warn_time;

		/* Warn about this once per day */
		if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
			ext4_msg(inode->i_sb, KERN_WARNING,
				 "Unaligned AIO/DIO on inode %ld by %s; "
				 "performance will be poor.",
				 inode->i_ino, current->comm);
		mutex_lock(ext4_aio_mutex(inode));
		ext4_aiodio_wait(inode);
	}

	ret = generic_file_aio_write(iocb, iov, nr_segs, pos);

	if (unaligned_aio)
		mutex_unlock(ext4_aio_mutex(inode));

	trace_file_write_done(iocb->ki_filp);
	return ret;
}
Esempio n. 4
0
static ssize_t
ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
                unsigned long nr_segs, loff_t pos)
{
    struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
    int unaligned_aio = 0;
    int ret;

    trace_ext4_file_write(iocb->ki_filp->f_path.dentry, iocb->ki_left);

    if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        size_t length = iov_length(iov, nr_segs);

        if ((pos > sbi->s_bitmap_maxbytes ||
                (pos == sbi->s_bitmap_maxbytes && length > 0)))
            return -EFBIG;

        if (pos + length > sbi->s_bitmap_maxbytes) {
            nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
                                  sbi->s_bitmap_maxbytes - pos);
        }
    } else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) &&
                        !is_sync_kiocb(iocb))) {
        unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
    }


    if (unaligned_aio) {
        static unsigned long unaligned_warn_time;


        if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
            ext4_msg(inode->i_sb, KERN_WARNING,
                     "Unaligned AIO/DIO on inode %ld by %s; "
                     "performance will be poor.",
                     inode->i_ino, current->comm);
        mutex_lock(ext4_aio_mutex(inode));
        ext4_aiodio_wait(inode);
    }

    ret = generic_file_aio_write(iocb, iov, nr_segs, pos);

    if (unaligned_aio)
        mutex_unlock(ext4_aio_mutex(inode));

    return ret;
}
static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
		struct sg_io_hdr *hdr, fmode_t mode)
{
	unsigned long start_time;
	int writing = 0, ret = 0;
	struct request *rq;
	char sense[SCSI_SENSE_BUFFERSIZE];
	struct bio *bio;

	if (hdr->interface_id != 'S')
		return -EINVAL;
	if (hdr->cmd_len > BLK_MAX_CDB)
		return -EINVAL;

	if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
		return -EIO;

	if (hdr->dxfer_len)
		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_TO_FROM_DEV:
		case SG_DXFER_FROM_DEV:
			break;
		}

	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
	if (!rq)
		return -ENOMEM;

	if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
		blk_put_request(rq);
		return -EFAULT;
	}

	if (hdr->iovec_count) {
		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
		size_t iov_data_len;
		struct sg_iovec *sg_iov;
		struct iovec *iov;
		int i;

		sg_iov = kmalloc(size, GFP_KERNEL);
		if (!sg_iov) {
			ret = -ENOMEM;
			goto out;
		}

		if (copy_from_user(sg_iov, hdr->dxferp, size)) {
			kfree(sg_iov);
			ret = -EFAULT;
			goto out;
		}

		/*
		 * Sum up the vecs, making sure they don't overflow
		 */
		iov = (struct iovec *) sg_iov;
		iov_data_len = 0;
		for (i = 0; i < hdr->iovec_count; i++) {
			if (iov_data_len + iov[i].iov_len < iov_data_len) {
				kfree(sg_iov);
				ret = -EINVAL;
				goto out;
			}
			iov_data_len += iov[i].iov_len;
		}

		/* SG_IO howto says that the shorter of the two wins */
		if (hdr->dxfer_len < iov_data_len) {
			hdr->iovec_count = iov_shorten(iov,
						       hdr->iovec_count,
						       hdr->dxfer_len);
			iov_data_len = hdr->dxfer_len;
		}

		ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
					  iov_data_len, GFP_KERNEL);
		kfree(sg_iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
				      GFP_KERNEL);

	if (ret)
		goto out;

	bio = rq->bio;
	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;
	rq->retries = 0;

	start_time = jiffies;

	/* ignore return value. All information is passed back to caller
	 * (if he doesn't check that is his problem).
	 * N.B. a non-zero SCSI status is _not_ necessarily an error.
	 */
	blk_execute_rq(q, bd_disk, rq, 0);

	hdr->duration = jiffies_to_msecs(jiffies - start_time);

	return blk_complete_sghdr_rq(rq, hdr, bio);
out:
	blk_put_request(rq);
	return ret;
}
Esempio n. 6
0
int main(int argc, char **argv) {
  struct ieee154_frame_addr frame_address;
  struct lowpan_reconstruct recon;
  struct lowpan_ctx ctx;
  struct ip6_packet pkt;
  char print_buf[256];
  int idx = 0, rv;
  char c, val;
  struct ip_iovec *v, *tail;
  //memset(frame, 0, sizeof(frame));

  /* read destination */
  cur = print_buf;
  while ((c = getc(stdin)) != '\n')
    *cur++ = c;
  *cur++ = '\0';
  ieee154_parse(print_buf, &frame_address.ieee_src);

  /* read source */
  cur = print_buf;
  while ((c = getc(stdin)) != '\n')
    *cur++ = c;
  *cur++ = '\0';
  ieee154_parse(print_buf, &frame_address.ieee_dst);
  frame_address.ieee_dstpan = 0x22;

  v = malloc(sizeof(struct ip_iovec));
  v->iov_base = malloc(1500);
  v->iov_len = 0;
  v->iov_next = NULL;
  tail = v;
  cur = v->iov_base;
  while ((c = getc(stdin)) != EOF) {
    c = tolower(c);
    if (c >= 'a' && c <= 'f'){
      c = c - 'a' + 10;
    } else if (c >= '0' && c <= '9') {
      c = c - '0';
    } else if (c == '\n' || c == '\r') {
      if (v->iov_len > 0) {
        fprintf(stderr, "Making new (%i)\n", v->iov_len);
        struct ip_iovec *v2 = malloc(sizeof(struct ip_iovec));
        v2->iov_next = NULL;
        v2->iov_len = 0;
        v2->iov_base = malloc(1500);
        tail->iov_next = v2;
        tail = tail->iov_next;
        cur = tail->iov_base;
      }
      continue;
    } else
      continue;

    if (idx++ % 2 == 0) {
      *cur |= c << 4;
    } else {
      *cur++ |= c;
      tail->iov_len ++;
    }
  }

  ieee154_print(&frame_address.ieee_src, print_buf, sizeof(print_buf));
  fprintf(stderr, "src: %s", print_buf);
  ieee154_print(&frame_address.ieee_dst, print_buf, sizeof(print_buf));
  fprintf(stderr, " dest: %s\n", print_buf);
  //fprintf(stderr, "packet [%li]\n", cur - frame);
  //fprint_buffer(stderr, frame, cur - frame);
  fprintf(stderr, "\n");

  if (iov_len(v) < sizeof(struct ip6_hdr))
    return 1;

  memset(&ctx, 0, sizeof(ctx));
  iov_read(v, 0, sizeof(struct ip6_hdr), &pkt.ip6_hdr);
  pkt.ip6_data = iov_shorten(v, sizeof(struct ip6_hdr));
  pkt.ip6_hdr.ip6_plen = htons(iov_len(pkt.ip6_data));
  // iov_print(v);

  while ((rv = lowpan_frag_get(fragment, sizeof(fragment),
                               &pkt, &frame_address, &ctx)) > 0) {
    fragment[0] = rv - 1;       /* set the 802.15.4 length */
    print_buffer_bare(fragment, rv);
    fprintf(stderr, "fragment [%i]\n", rv);
    fprint_buffer(stderr, fragment, rv);
    printf("\n");
  }  
}
Esempio n. 7
0
static ssize_t
ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
		unsigned long nr_segs, loff_t pos)
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file->f_path.dentry->d_inode;
	ssize_t ret;
	int err;

	/*
	 * If we have encountered a bitmap-format file, the size limit
	 * is smaller than s_maxbytes, which is for extent-mapped files.
	 */

	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
		size_t length = iov_length(iov, nr_segs);

		if (pos > sbi->s_bitmap_maxbytes)
			return -EFBIG;

		if (pos + length > sbi->s_bitmap_maxbytes) {
			nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
					      sbi->s_bitmap_maxbytes - pos);
		}
	}

	ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
	/*
	 * Skip flushing if there was an error, or if nothing was written.
	 */
	if (ret <= 0)
		return ret;

	/*
	 * If the inode is IS_SYNC, or is O_SYNC and we are doing data
	 * journalling then we need to make sure that we force the transaction
	 * to disk to keep all metadata uptodate synchronously.
	 */
	if (file->f_flags & O_SYNC) {
		/*
		 * If we are non-data-journaled, then the dirty data has
		 * already been flushed to backing store by generic_osync_inode,
		 * and the inode has been flushed too if there have been any
		 * modifications other than mere timestamp updates.
		 *
		 * Open question --- do we care about flushing timestamps too
		 * if the inode is IS_SYNC?
		 */
		if (!ext4_should_journal_data(inode))
			return ret;

		goto force_commit;
	}

	/*
	 * So we know that there has been no forced data flush.  If the inode
	 * is marked IS_SYNC, we need to force one ourselves.
	 */
	if (!IS_SYNC(inode))
		return ret;

	/*
	 * Open question #2 --- should we force data to disk here too?  If we
	 * don't, the only impact is that data=writeback filesystems won't
	 * flush data to disk automatically on IS_SYNC, only metadata (but
	 * historically, that is what ext2 has done.)
	 */

force_commit:
	err = ext4_force_commit(inode->i_sb);
	if (err)
		return err;
	return ret;
}
Esempio n. 8
0
static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
					struct skd_sg_io *sksgio)
{
	struct sg_io_hdr *sgp = &sksgio->sg;
	int i, acc;

	if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
		pr_debug("%s:%s:%d access sg failed %p\n",
			 skdev->name, __func__, __LINE__, sksgio->argp);
		return -EFAULT;
	}

	if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
		pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
			 skdev->name, __func__, __LINE__, sksgio->argp);
		return -EFAULT;
	}

	if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
		pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
			 skdev->name, __func__, __LINE__, sgp->interface_id);
		return -EINVAL;
	}

	if (sgp->cmd_len > sizeof(sksgio->cdb)) {
		pr_debug("%s:%s:%d cmd_len invalid %d\n",
			 skdev->name, __func__, __LINE__, sgp->cmd_len);
		return -EINVAL;
	}

	if (sgp->iovec_count > 256) {
		pr_debug("%s:%s:%d iovec_count invalid %d\n",
			 skdev->name, __func__, __LINE__, sgp->iovec_count);
		return -EINVAL;
	}

	if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
		pr_debug("%s:%s:%d dxfer_len invalid %d\n",
			 skdev->name, __func__, __LINE__, sgp->dxfer_len);
		return -EINVAL;
	}

	switch (sgp->dxfer_direction) {
	case SG_DXFER_NONE:
		acc = -1;
		break;

	case SG_DXFER_TO_DEV:
		acc = VERIFY_READ;
		break;

	case SG_DXFER_FROM_DEV:
	case SG_DXFER_TO_FROM_DEV:
		acc = VERIFY_WRITE;
		break;

	default:
		pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
			 skdev->name, __func__, __LINE__, sgp->dxfer_direction);
		return -EINVAL;
	}

	if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
		pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
			 skdev->name, __func__, __LINE__, sgp->cmdp);
		return -EFAULT;
	}

	if (sgp->mx_sb_len != 0) {
		if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
			pr_debug("%s:%s:%d access sbp failed %p\n",
				 skdev->name, __func__, __LINE__, sgp->sbp);
			return -EFAULT;
		}
	}

	if (sgp->iovec_count == 0) {
		sksgio->iov[0].iov_base = sgp->dxferp;
		sksgio->iov[0].iov_len = sgp->dxfer_len;
		sksgio->iovcnt = 1;
		sksgio->dxfer_len = sgp->dxfer_len;
	} else {
		struct sg_iovec *iov;
		uint nbytes = sizeof(*iov) * sgp->iovec_count;
		size_t iov_data_len;

		iov = kmalloc(nbytes, GFP_KERNEL);
		if (iov == NULL) {
			pr_debug("%s:%s:%d alloc iovec failed %d\n",
				 skdev->name, __func__, __LINE__,
				 sgp->iovec_count);
			return -ENOMEM;
		}
		sksgio->iov = iov;
		sksgio->iovcnt = sgp->iovec_count;

		if (copy_from_user(iov, sgp->dxferp, nbytes)) {
			pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
				 skdev->name, __func__, __LINE__, sgp->dxferp);
			return -EFAULT;
		}

		/*
		 * Sum up the vecs, making sure they don't overflow
		 */
		iov_data_len = 0;
		for (i = 0; i < sgp->iovec_count; i++) {
			if (iov_data_len + iov[i].iov_len < iov_data_len)
				return -EINVAL;
			iov_data_len += iov[i].iov_len;
		}

		/* SG_IO howto says that the shorter of the two wins */
		if (sgp->dxfer_len < iov_data_len) {
			sksgio->iovcnt = iov_shorten((struct iovec *)iov,
						     sgp->iovec_count,
						     sgp->dxfer_len);
			sksgio->dxfer_len = sgp->dxfer_len;
		} else
			sksgio->dxfer_len = iov_data_len;
	}

	if (sgp->dxfer_direction != SG_DXFER_NONE) {
		struct sg_iovec *iov = sksgio->iov;
		for (i = 0; i < sksgio->iovcnt; i++, iov++) {
			if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
				pr_debug("%s:%s:%d access data failed %p/%d\n",
					 skdev->name, __func__, __LINE__,
					 iov->iov_base, (int)iov->iov_len);
				return -EFAULT;
			}
		}
	}

	return 0;
}
static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
		struct sg_io_hdr *hdr, fmode_t mode)
{
	unsigned long start_time;
	int writing = 0, ret = 0;
	struct request *rq;
	char sense[SCSI_SENSE_BUFFERSIZE];
	struct bio *bio;

	if (hdr->interface_id != 'S')
		return -EINVAL;
	if (hdr->cmd_len > BLK_MAX_CDB)
		return -EINVAL;

	if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
		return -EIO;

	if (hdr->dxfer_len)
		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_TO_FROM_DEV:
		case SG_DXFER_FROM_DEV:
			break;
		}

	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
	if (!rq)
		return -ENOMEM;

	if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
		blk_put_request(rq);
		return -EFAULT;
	}

	if (hdr->iovec_count) {
		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
		size_t iov_data_len;
		struct sg_iovec *sg_iov;
		struct iovec *iov;
		int i;

		sg_iov = kmalloc(size, GFP_KERNEL);
		if (!sg_iov) {
			ret = -ENOMEM;
			goto out;
		}

		if (copy_from_user(sg_iov, hdr->dxferp, size)) {
			kfree(sg_iov);
			ret = -EFAULT;
			goto out;
		}

		iov = (struct iovec *) sg_iov;
		iov_data_len = 0;
		for (i = 0; i < hdr->iovec_count; i++) {
			if (iov_data_len + iov[i].iov_len < iov_data_len) {
				kfree(sg_iov);
				ret = -EINVAL;
				goto out;
			}
			iov_data_len += iov[i].iov_len;
		}

		
		if (hdr->dxfer_len < iov_data_len) {
			hdr->iovec_count = iov_shorten(iov,
						       hdr->iovec_count,
						       hdr->dxfer_len);
			iov_data_len = hdr->dxfer_len;
		}

		ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
					  iov_data_len, GFP_KERNEL);
		kfree(sg_iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
				      GFP_KERNEL);

	if (ret)
		goto out;

	bio = rq->bio;
	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;
	rq->retries = 0;

	start_time = jiffies;

	blk_execute_rq(q, bd_disk, rq, 0);

	hdr->duration = jiffies_to_msecs(jiffies - start_time);

	return blk_complete_sghdr_rq(rq, hdr, bio);
out:
	blk_put_request(rq);
	return ret;
}