Example #1
0
static ssize_t
ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
	struct inode *inode = file_inode(file);
	size_t already_written = 0;
	off_t pos;
	size_t bufsize;
	int errno;
	void* bouncebuffer;

	ncp_dbg(1, "enter %pD2\n", file);
	if ((ssize_t) count < 0)
		return -EINVAL;
	pos = *ppos;
	if (file->f_flags & O_APPEND) {
		pos = i_size_read(inode);
	}

	if (pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) {
		if (pos >= MAX_NON_LFS) {
			return -EFBIG;
		}
		if (count > MAX_NON_LFS - (u32)pos) {
			count = MAX_NON_LFS - (u32)pos;
		}
	}
	if (pos >= inode->i_sb->s_maxbytes) {
		if (count || pos > inode->i_sb->s_maxbytes) {
			return -EFBIG;
		}
	}
	if (pos + count > inode->i_sb->s_maxbytes) {
		count = inode->i_sb->s_maxbytes - pos;
	}
	
	if (!count)
		return 0;
	errno = ncp_make_open(inode, O_WRONLY);
	if (errno) {
		ncp_dbg(1, "open failed, error=%d\n", errno);
		return errno;
	}
	bufsize = NCP_SERVER(inode)->buffer_size;

	already_written = 0;

	errno = file_update_time(file);
	if (errno)
		goto outrel;

	bouncebuffer = vmalloc(bufsize);
	if (!bouncebuffer) {
		errno = -EIO;	/* -ENOMEM */
		goto outrel;
	}
	while (already_written < count) {
		int written_this_time;
		size_t to_write = min_t(unsigned int,
				      bufsize - (pos % bufsize),
				      count - already_written);

		if (copy_from_user(bouncebuffer, buf, to_write)) {
			errno = -EFAULT;
			break;
		}
		if (ncp_write_kernel(NCP_SERVER(inode), 
		    NCP_FINFO(inode)->file_handle,
		    pos, to_write, bouncebuffer, &written_this_time) != 0) {
			errno = -EIO;
			break;
		}
		pos += written_this_time;
		buf += written_this_time;
		already_written += written_this_time;

		if (written_this_time != to_write) {
			break;
		}
	}
	vfree(bouncebuffer);

	*ppos = pos;

	if (pos > i_size_read(inode)) {
		mutex_lock(&inode->i_mutex);
		if (pos > i_size_read(inode))
			i_size_write(inode, pos);
		mutex_unlock(&inode->i_mutex);
	}
	ncp_dbg(1, "exit %pD2\n", file);
outrel:
	ncp_inode_close(inode);		
	return already_written ? already_written : errno;
}
Example #2
0
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
    struct inode *inode = file->f_mapping->host;
    struct f2fs_inode_info *fi = F2FS_I(inode);
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    nid_t ino = inode->i_ino;
    int ret = 0;
    bool need_cp = false;
    struct writeback_control wbc = {
        .sync_mode = WB_SYNC_ALL,
        .nr_to_write = LONG_MAX,
        .for_reclaim = 0,
    };

    if (unlikely(f2fs_readonly(inode->i_sb)))
        return 0;

    trace_f2fs_sync_file_enter(inode);

    /* if fdatasync is triggered, let's do in-place-update */
    if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
        set_inode_flag(fi, FI_NEED_IPU);
    ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
    clear_inode_flag(fi, FI_NEED_IPU);

    if (ret) {
        trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
        return ret;
    }

    /* if the inode is dirty, let's recover all the time */
    if (!datasync && is_inode_flag_set(fi, FI_DIRTY_INODE)) {
        update_inode_page(inode);
        goto go_write;
    }

    /*
     * if there is no written data, don't waste time to write recovery info.
     */
    if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
            !exist_written_data(sbi, ino, APPEND_INO)) {

        /* it may call write_inode just prior to fsync */
        if (need_inode_page_update(sbi, ino))
            goto go_write;

        if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
                exist_written_data(sbi, ino, UPDATE_INO))
            goto flush_out;
        goto out;
    }
go_write:
    /* guarantee free sections for fsync */
    f2fs_balance_fs(sbi);

    /*
     * Both of fdatasync() and fsync() are able to be recovered from
     * sudden-power-off.
     */
    down_read(&fi->i_sem);
    need_cp = need_do_checkpoint(inode);
    up_read(&fi->i_sem);

    if (need_cp) {
        /* all the dirty node pages should be flushed for POR */
        ret = f2fs_sync_fs(inode->i_sb, 1);

        /*
         * We've secured consistency through sync_fs. Following pino
         * will be used only for fsynced inodes after checkpoint.
         */
        try_to_fix_pino(inode);
        clear_inode_flag(fi, FI_APPEND_WRITE);
        clear_inode_flag(fi, FI_UPDATE_WRITE);
        goto out;
    }
sync_nodes:
    sync_node_pages(sbi, ino, &wbc);

    /* if cp_error was enabled, we should avoid infinite loop */
    if (unlikely(f2fs_cp_error(sbi)))
        goto out;

    if (need_inode_block_update(sbi, ino)) {
        mark_inode_dirty_sync(inode);
        f2fs_write_inode(inode, NULL);
        goto sync_nodes;
    }

    ret = wait_on_node_pages_writeback(sbi, ino);
    if (ret)
        goto out;

    /* once recovery info is written, don't need to tack this */
    remove_dirty_inode(sbi, ino, APPEND_INO);
    clear_inode_flag(fi, FI_APPEND_WRITE);
flush_out:
    remove_dirty_inode(sbi, ino, UPDATE_INO);
    clear_inode_flag(fi, FI_UPDATE_WRITE);
    ret = f2fs_issue_flush(sbi);
out:
    trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
    f2fs_trace_ios(NULL, 1);
    return ret;
}

static pgoff_t __get_first_dirty_index(struct address_space *mapping,
                                       pgoff_t pgofs, int whence)
{
    struct pagevec pvec;
    int nr_pages;

    if (whence != SEEK_DATA)
        return 0;

    /* find first dirty page index */
    pagevec_init(&pvec, 0);
    nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
                                  PAGECACHE_TAG_DIRTY, 1);
    pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX;
    pagevec_release(&pvec);
    return pgofs;
}

static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
                           int whence)
{
    switch (whence) {
    case SEEK_DATA:
        if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
                (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
            return true;
        break;
    case SEEK_HOLE:
        if (blkaddr == NULL_ADDR)
            return true;
        break;
    }
    return false;
}

static inline int unsigned_offsets(struct file *file)
{
    return file->f_mode & FMODE_UNSIGNED_OFFSET;
}

static loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
{
    if (offset < 0 && !unsigned_offsets(file))
        return -EINVAL;
    if (offset > maxsize)
        return -EINVAL;

    if (offset != file->f_pos) {
        file->f_pos = offset;
        file->f_version = 0;
    }
    return offset;
}

static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
    struct inode *inode = file->f_mapping->host;
    loff_t maxbytes = inode->i_sb->s_maxbytes;
    struct dnode_of_data dn;
    pgoff_t pgofs, end_offset, dirty;
    loff_t data_ofs = offset;
    loff_t isize;
    int err = 0;

    mutex_lock(&inode->i_mutex);

    isize = i_size_read(inode);
    if (offset >= isize)
        goto fail;

    /* handle inline data case */
    if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
        if (whence == SEEK_HOLE)
            data_ofs = isize;
        goto found;
    }

    pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);

    dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);

    for (; data_ofs < isize; data_ofs = pgofs << PAGE_CACHE_SHIFT) {
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
        if (err && err != -ENOENT) {
            goto fail;
        } else if (err == -ENOENT) {
            /* direct node does not exists */
            if (whence == SEEK_DATA) {
                pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
                                            F2FS_I(inode));
                continue;
            } else {
                goto found;
            }
        }

        end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));

        /* find data/hole in dnode block */
        for (; dn.ofs_in_node < end_offset;
                dn.ofs_in_node++, pgofs++,
                data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
            block_t blkaddr;
            blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

            if (__found_offset(blkaddr, dirty, pgofs, whence)) {
                f2fs_put_dnode(&dn);
                goto found;
            }
        }
        f2fs_put_dnode(&dn);
    }

    if (whence == SEEK_DATA)
        goto fail;
found:
    if (whence == SEEK_HOLE && data_ofs > isize)
        data_ofs = isize;
    mutex_unlock(&inode->i_mutex);
    return vfs_setpos(file, data_ofs, maxbytes);
fail:
    mutex_unlock(&inode->i_mutex);
    return -ENXIO;
}

static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
    struct inode *inode = file->f_mapping->host;
    loff_t maxbytes = inode->i_sb->s_maxbytes;

    switch (whence) {
    case SEEK_SET:
    case SEEK_CUR:
    case SEEK_END:
        return generic_file_llseek_size(file, offset, whence,
                                        maxbytes);
    case SEEK_DATA:
    case SEEK_HOLE:
        if (offset < 0)
            return -ENXIO;
        return f2fs_seek_block(file, offset, whence);
    }

    return -EINVAL;
}

static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
    struct inode *inode = file_inode(file);

    /* we don't need to use inline_data strictly */
    if (f2fs_has_inline_data(inode)) {
        int err = f2fs_convert_inline_inode(inode);
        if (err)
            return err;
    }

    file_accessed(file);
    vma->vm_ops = &f2fs_file_vm_ops;
    return 0;
}

int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
{
    int nr_free = 0, ofs = dn->ofs_in_node;
    struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
    struct f2fs_node *raw_node;
    __le32 *addr;

    raw_node = F2FS_NODE(dn->node_page);
    addr = blkaddr_in_node(raw_node) + ofs;

    for (; count > 0; count--, addr++, dn->ofs_in_node++) {
        block_t blkaddr = le32_to_cpu(*addr);
        if (blkaddr == NULL_ADDR)
            continue;

        dn->data_blkaddr = NULL_ADDR;
        set_data_blkaddr(dn);
        f2fs_update_extent_cache(dn);
        invalidate_blocks(sbi, blkaddr);
        if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
            clear_inode_flag(F2FS_I(dn->inode),
                             FI_FIRST_BLOCK_WRITTEN);
        nr_free++;
    }
    if (nr_free) {
        dec_valid_block_count(sbi, dn->inode, nr_free);
        set_page_dirty(dn->node_page);
        sync_inode_page(dn);
    }
    dn->ofs_in_node = ofs;

    trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
                                          dn->ofs_in_node, nr_free);
    return nr_free;
}
Example #3
0
static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
{
    struct inode *inode = file_inode(filp);

    return put_user(inode->i_generation, (int __user *)arg);
}
Example #4
0
struct video_device *video_devdata(struct file *file)
{
	return video_device[iminor(file_inode(file))];
}