Exemplo n.º 1
0
Arquivo: aops.c Projeto: nemumu/linux
static int gfs2_write_begin(struct file *file, struct address_space *mapping,
                            loff_t pos, unsigned len, unsigned flags,
                            struct page **pagep, void **fsdata)
{
    struct gfs2_inode *ip = GFS2_I(mapping->host);
    struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
    struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
    unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
    unsigned requested = 0;
    int alloc_required;
    int error = 0;
    pgoff_t index = pos >> PAGE_CACHE_SHIFT;
    unsigned from = pos & (PAGE_CACHE_SIZE - 1);
    struct page *page;

    gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
    error = gfs2_glock_nq(&ip->i_gh);
    if (unlikely(error))
        goto out_uninit;
    if (&ip->i_inode == sdp->sd_rindex) {
        error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
                                   GL_NOCACHE, &m_ip->i_gh);
        if (unlikely(error)) {
            gfs2_glock_dq(&ip->i_gh);
            goto out_uninit;
        }
    }

    alloc_required = gfs2_write_alloc_required(ip, pos, len);

    if (alloc_required || gfs2_is_jdata(ip))
        gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);

    if (alloc_required) {
        struct gfs2_alloc_parms ap = { .aflags = 0, };
        error = gfs2_quota_lock_check(ip);
        if (error)
            goto out_unlock;

        requested = data_blocks + ind_blocks;
        ap.target = requested;
        error = gfs2_inplace_reserve(ip, &ap);
        if (error)
            goto out_qunlock;
    }

    rblocks = RES_DINODE + ind_blocks;
    if (gfs2_is_jdata(ip))
        rblocks += data_blocks ? data_blocks : 1;
    if (ind_blocks || data_blocks)
        rblocks += RES_STATFS + RES_QUOTA;
    if (&ip->i_inode == sdp->sd_rindex)
        rblocks += 2 * RES_STATFS;
    if (alloc_required)
        rblocks += gfs2_rg_blocks(ip, requested);

    error = gfs2_trans_begin(sdp, rblocks,
                             PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
    if (error)
        goto out_trans_fail;

    error = -ENOMEM;
    flags |= AOP_FLAG_NOFS;
    page = grab_cache_page_write_begin(mapping, index, flags);
    *pagep = page;
    if (unlikely(!page))
        goto out_endtrans;

    if (gfs2_is_stuffed(ip)) {
        error = 0;
        if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
            error = gfs2_unstuff_dinode(ip, page);
            if (error == 0)
                goto prepare_write;
        } else if (!PageUptodate(page)) {
            error = stuffed_readpage(ip, page);
        }
        goto out;
    }

prepare_write:
    error = __block_write_begin(page, from, len, gfs2_block_map);
out:
    if (error == 0)
        return 0;

    unlock_page(page);
    page_cache_release(page);

    gfs2_trans_end(sdp);
    if (pos + len > ip->i_inode.i_size)
        gfs2_trim_blocks(&ip->i_inode);
    goto out_trans_fail;

out_endtrans:
    gfs2_trans_end(sdp);
out_trans_fail:
    if (alloc_required) {
        gfs2_inplace_release(ip);
out_qunlock:
        gfs2_quota_unlock(ip);
    }
out_unlock:
    if (&ip->i_inode == sdp->sd_rindex) {
        gfs2_glock_dq(&m_ip->i_gh);
        gfs2_holder_uninit(&m_ip->i_gh);
    }
    gfs2_glock_dq(&ip->i_gh);
out_uninit:
    gfs2_holder_uninit(&ip->i_gh);
    return error;
}

/**
 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
 * @inode: the rindex inode
 */
static void adjust_fs_space(struct inode *inode)
{
    struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
    struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
    struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
    struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
    struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
    struct buffer_head *m_bh, *l_bh;
    u64 fs_total, new_free;

    /* Total up the file system space, according to the latest rindex. */
    fs_total = gfs2_ri_total(sdp);
    if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
        return;

    spin_lock(&sdp->sd_statfs_spin);
    gfs2_statfs_change_in(m_sc, m_bh->b_data +
                          sizeof(struct gfs2_dinode));
    if (fs_total > (m_sc->sc_total + l_sc->sc_total))
        new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
    else
        new_free = 0;
    spin_unlock(&sdp->sd_statfs_spin);
    fs_warn(sdp, "File system extended by %llu blocks.\n",
            (unsigned long long)new_free);
    gfs2_statfs_change(sdp, new_free, new_free, 0);

    if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
        goto out;
    update_statfs(sdp, m_bh, l_bh);
    brelse(l_bh);
out:
    brelse(m_bh);
}

/**
 * gfs2_stuffed_write_end - Write end for stuffed files
 * @inode: The inode
 * @dibh: The buffer_head containing the on-disk inode
 * @pos: The file position
 * @len: The length of the write
 * @copied: How much was actually copied by the VFS
 * @page: The page
 *
 * This copies the data from the page into the inode block after
 * the inode data structure itself.
 *
 * Returns: errno
 */
static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
                                  loff_t pos, unsigned len, unsigned copied,
                                  struct page *page)
{
    struct gfs2_inode *ip = GFS2_I(inode);
    struct gfs2_sbd *sdp = GFS2_SB(inode);
    struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
    u64 to = pos + copied;
    void *kaddr;
    unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);

    BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
    kaddr = kmap_atomic(page);
    memcpy(buf + pos, kaddr + pos, copied);
    memset(kaddr + pos + copied, 0, len - copied);
    flush_dcache_page(page);
    kunmap_atomic(kaddr);

    if (!PageUptodate(page))
        SetPageUptodate(page);
    unlock_page(page);
    page_cache_release(page);

    if (copied) {
        if (inode->i_size < to)
            i_size_write(inode, to);
        mark_inode_dirty(inode);
    }

    if (inode == sdp->sd_rindex) {
        adjust_fs_space(inode);
        sdp->sd_rindex_uptodate = 0;
    }

    brelse(dibh);
    gfs2_trans_end(sdp);
    if (inode == sdp->sd_rindex) {
        gfs2_glock_dq(&m_ip->i_gh);
        gfs2_holder_uninit(&m_ip->i_gh);
    }
    gfs2_glock_dq(&ip->i_gh);
    gfs2_holder_uninit(&ip->i_gh);
    return copied;
}

/**
 * gfs2_write_end
 * @file: The file to write to
 * @mapping: The address space to write to
 * @pos: The file position
 * @len: The length of the data
 * @copied:
 * @page: The page that has been written
 * @fsdata: The fsdata (unused in GFS2)
 *
 * The main write_end function for GFS2. We have a separate one for
 * stuffed files as they are slightly different, otherwise we just
 * put our locking around the VFS provided functions.
 *
 * Returns: errno
 */

static int gfs2_write_end(struct file *file, struct address_space *mapping,
                          loff_t pos, unsigned len, unsigned copied,
                          struct page *page, void *fsdata)
{
    struct inode *inode = page->mapping->host;
    struct gfs2_inode *ip = GFS2_I(inode);
    struct gfs2_sbd *sdp = GFS2_SB(inode);
    struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
    struct buffer_head *dibh;
    unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
    unsigned int to = from + len;
    int ret;
    struct gfs2_trans *tr = current->journal_info;
    BUG_ON(!tr);

    BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);

    ret = gfs2_meta_inode_buffer(ip, &dibh);
    if (unlikely(ret)) {
        unlock_page(page);
        page_cache_release(page);
        goto failed;
    }

    if (gfs2_is_stuffed(ip))
        return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);

    if (!gfs2_is_writeback(ip))
        gfs2_page_add_databufs(ip, page, from, to);

    ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
    if (tr->tr_num_buf_new)
        __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
    else
        gfs2_trans_add_meta(ip->i_gl, dibh);


    if (inode == sdp->sd_rindex) {
        adjust_fs_space(inode);
        sdp->sd_rindex_uptodate = 0;
    }

    brelse(dibh);
failed:
    gfs2_trans_end(sdp);
    gfs2_inplace_release(ip);
    if (ip->i_res->rs_qa_qd_num)
        gfs2_quota_unlock(ip);
    if (inode == sdp->sd_rindex) {
        gfs2_glock_dq(&m_ip->i_gh);
        gfs2_holder_uninit(&m_ip->i_gh);
    }
    gfs2_glock_dq(&ip->i_gh);
    gfs2_holder_uninit(&ip->i_gh);
    return ret;
}

/**
 * gfs2_set_page_dirty - Page dirtying function
 * @page: The page to dirty
 *
 * Returns: 1 if it dirtyed the page, or 0 otherwise
 */

static int gfs2_set_page_dirty(struct page *page)
{
    SetPageChecked(page);
    return __set_page_dirty_buffers(page);
}
Exemplo n.º 2
0
static int jffs2_commit_write (struct file *filp, struct page *pg,
			       unsigned start, unsigned end)
{
	/* Actually commit the write from the page cache page we're looking at.
	 * For now, we write the full page out each time. It sucks, but it's simple
	 */
	struct inode *inode = pg->mapping->host;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_raw_inode *ri;
	unsigned aligned_start = start & ~3;
	int ret = 0;
	uint32_t writtenlen = 0;

	D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
		  inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));

	if (!start && end == PAGE_CACHE_SIZE) {
		/* We need to avoid deadlock with page_cache_read() in
		   jffs2_garbage_collect_pass(). So we have to mark the
		   page up to date, to prevent page_cache_read() from 
		   trying to re-lock it. */
		SetPageUptodate(pg);
	}

	ri = jffs2_alloc_raw_inode();

	if (!ri) {
		D1(printk(KERN_DEBUG "jffs2_commit_write(): Allocation of raw inode failed\n"));
		return -ENOMEM;
	}

	/* Set the fields that the generic jffs2_write_inode_range() code can't find */
	ri->ino = cpu_to_je32(inode->i_ino);
	ri->mode = cpu_to_jemode(inode->i_mode);
	ri->uid = cpu_to_je16(inode->i_uid);
	ri->gid = cpu_to_je16(inode->i_gid);
	ri->isize = cpu_to_je32((uint32_t)inode->i_size);
	ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());

	/* In 2.4, it was already kmapped by generic_file_write(). Doesn't
	   hurt to do it again. The alternative is ifdefs, which are ugly. */
	kmap(pg);

	ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
				      (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
				      end - aligned_start, &writtenlen);

	kunmap(pg);

	if (ret) {
		/* There was an error writing. */
		SetPageError(pg);
	}
	
	/* Adjust writtenlen for the padding we did, so we don't confuse our caller */
	if (writtenlen < (start&3))
		writtenlen = 0;
	else
		writtenlen -= (start&3);

	if (writtenlen) {
		if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) {
			inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen;
			inode->i_blocks = (inode->i_size + 511) >> 9;
			
			inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
		}
	}
Exemplo n.º 3
0
/*
 * read page from file, directory or symlink, given a key to use
 */
int afs_page_filler(void *data, struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct afs_vnode *vnode = AFS_FS_I(inode);
	struct key *key = data;
	size_t len;
	off_t offset;
	int ret;

	_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);

	BUG_ON(!PageLocked(page));

	ret = -ESTALE;
	if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
		goto error;

	/* is it cached? */
#ifdef CONFIG_AFS_FSCACHE
	ret = fscache_read_or_alloc_page(vnode->cache,
					 page,
					 afs_file_readpage_read_complete,
					 NULL,
					 GFP_KERNEL);
#else
	ret = -ENOBUFS;
#endif
	switch (ret) {
		/* read BIO submitted (page in cache) */
	case 0:
		break;

		/* page not yet cached */
	case -ENODATA:
		_debug("cache said ENODATA");
		goto go_on;

		/* page will not be cached */
	case -ENOBUFS:
		_debug("cache said ENOBUFS");
	default:
	go_on:
		offset = page->index << PAGE_CACHE_SHIFT;
		len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);

		/* read the contents of the file from the server into the
		 * page */
		ret = afs_vnode_fetch_data(vnode, key, offset, len, page);
		if (ret < 0) {
			if (ret == -ENOENT) {
				_debug("got NOENT from server"
				       " - marking file deleted and stale");
				set_bit(AFS_VNODE_DELETED, &vnode->flags);
				ret = -ESTALE;
			}

#ifdef CONFIG_AFS_FSCACHE
			fscache_uncache_page(vnode->cache, page);
#endif
			BUG_ON(PageFsCache(page));
			goto error;
		}

		SetPageUptodate(page);

		/* send the page to the cache */
#ifdef CONFIG_AFS_FSCACHE
		if (PageFsCache(page) &&
		    fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) {
			fscache_uncache_page(vnode->cache, page);
			BUG_ON(PageFsCache(page));
		}
#endif
		unlock_page(page);
	}

	_leave(" = 0");
	return 0;

error:
	SetPageError(page);
	unlock_page(page);
	_leave(" = %d", ret);
	return ret;
}
Exemplo n.º 4
0
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
{
	int err;
	struct page *ipage;
	struct dnode_of_data dn;
	void *src_addr, *dst_addr;
	block_t new_blk_addr;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct f2fs_io_info fio = {
		.type = DATA,
		.rw = WRITE_SYNC | REQ_PRIO,
	};

	f2fs_lock_op(sbi);
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	/*
	 * i_addr[0] is not used for inline data,
	 * so reserving new block will not destroy inline data
	 */
	set_new_dnode(&dn, inode, ipage, NULL, 0);
	err = f2fs_reserve_block(&dn, 0);
	if (err) {
		f2fs_unlock_op(sbi);
		return err;
	}

	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

	/* Copy the whole inline data block */
	src_addr = inline_data_addr(ipage);
	dst_addr = kmap(page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap(page);
	SetPageUptodate(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	write_data_page(page, &dn, &new_blk_addr, &fio);
	update_extent_cache(new_blk_addr, &dn);
	f2fs_wait_on_page_writeback(page, DATA, true);

	/* clear inline data and flag after data writeback */
	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
	stat_dec_inline_inode(inode);

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
	return err;
}

int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
{
	struct page *page;
	int err;

	if (!f2fs_has_inline_data(inode))
		return 0;
	else if (to_size <= MAX_INLINE_DATA)
		return 0;

	page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS);
	if (!page)
		return -ENOMEM;

	err = __f2fs_convert_inline_data(inode, page);
	f2fs_put_page(page, 1);
	return err;
}

int f2fs_write_inline_data(struct inode *inode,
			   struct page *page, unsigned size)
{
	void *src_addr, *dst_addr;
	struct page *ipage;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;
	ipage = dn.inode_page;

	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	src_addr = kmap(page);
	dst_addr = inline_data_addr(ipage);
	memcpy(dst_addr, src_addr, size);
	kunmap(page);

	/* Release the first data block if it is allocated */
	if (!f2fs_has_inline_data(inode)) {
		truncate_data_blocks_range(&dn, 1);
		set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
		stat_inc_inline_inode(inode);
	}

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);

	return 0;
}
Exemplo n.º 5
0
/*
 * Read a directory, using filldir to fill the dirent memory.
 * smb_proc_readdir does the actual reading from the smb server.
 *
 * The cache code is almost directly taken from ncpfs
 */
static int 
smb_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
	struct dentry *dentry = filp->f_path.dentry;
	struct inode *dir = dentry->d_inode;
	struct smb_sb_info *server = server_from_dentry(dentry);
	union  smb_dir_cache *cache = NULL;
	struct smb_cache_control ctl;
	struct page *page = NULL;
	int result;

	ctl.page  = NULL;
	ctl.cache = NULL;

	VERBOSE("reading %s/%s, f_pos=%d\n",
		DENTRY_PATH(dentry),  (int) filp->f_pos);

	result = 0;

	lock_kernel();

	switch ((unsigned int) filp->f_pos) {
	case 0:
		if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0)
			goto out;
		filp->f_pos = 1;
		/* fallthrough */
	case 1:
		if (filldir(dirent, "..", 2, 1, parent_ino(dentry), DT_DIR) < 0)
			goto out;
		filp->f_pos = 2;
	}

	/*
	 * Make sure our inode is up-to-date.
	 */
	result = smb_revalidate_inode(dentry);
	if (result)
		goto out;


	page = grab_cache_page(&dir->i_data, 0);
	if (!page)
		goto read_really;

	ctl.cache = cache = kmap(page);
	ctl.head  = cache->head;

	if (!PageUptodate(page) || !ctl.head.eof) {
		VERBOSE("%s/%s, page uptodate=%d, eof=%d\n",
			 DENTRY_PATH(dentry), PageUptodate(page),ctl.head.eof);
		goto init_cache;
	}

	if (filp->f_pos == 2) {
		if (jiffies - ctl.head.time >= SMB_MAX_AGE(server))
			goto init_cache;

		/*
		 * N.B. ncpfs checks mtime of dentry too here, we don't.
		 *   1. common smb servers do not update mtime on dir changes
		 *   2. it requires an extra smb request
		 *      (revalidate has the same timeout as ctl.head.time)
		 *
		 * Instead smbfs invalidates its own cache on local changes
		 * and remote changes are not seen until timeout.
		 */
	}

	if (filp->f_pos > ctl.head.end)
		goto finished;

	ctl.fpos = filp->f_pos + (SMB_DIRCACHE_START - 2);
	ctl.ofs  = ctl.fpos / SMB_DIRCACHE_SIZE;
	ctl.idx  = ctl.fpos % SMB_DIRCACHE_SIZE;

	for (;;) {
		if (ctl.ofs != 0) {
			ctl.page = find_lock_page(&dir->i_data, ctl.ofs);
			if (!ctl.page)
				goto invalid_cache;
			ctl.cache = kmap(ctl.page);
			if (!PageUptodate(ctl.page))
				goto invalid_cache;
		}
		while (ctl.idx < SMB_DIRCACHE_SIZE) {
			struct dentry *dent;
			int res;

			dent = smb_dget_fpos(ctl.cache->dentry[ctl.idx],
					     dentry, filp->f_pos);
			if (!dent)
				goto invalid_cache;

			res = filldir(dirent, dent->d_name.name,
				      dent->d_name.len, filp->f_pos,
				      dent->d_inode->i_ino, DT_UNKNOWN);
			dput(dent);
			if (res)
				goto finished;
			filp->f_pos += 1;
			ctl.idx += 1;
			if (filp->f_pos > ctl.head.end)
				goto finished;
		}
		if (ctl.page) {
			kunmap(ctl.page);
			SetPageUptodate(ctl.page);
			unlock_page(ctl.page);
			page_cache_release(ctl.page);
			ctl.page = NULL;
		}
		ctl.idx  = 0;
		ctl.ofs += 1;
	}
invalid_cache:
	if (ctl.page) {
		kunmap(ctl.page);
		unlock_page(ctl.page);
		page_cache_release(ctl.page);
		ctl.page = NULL;
	}
	ctl.cache = cache;
init_cache:
	smb_invalidate_dircache_entries(dentry);
	ctl.head.time = jiffies;
	ctl.head.eof = 0;
	ctl.fpos = 2;
	ctl.ofs = 0;
	ctl.idx = SMB_DIRCACHE_START;
	ctl.filled = 0;
	ctl.valid  = 1;
read_really:
	result = server->ops->readdir(filp, dirent, filldir, &ctl);
	if (result == -ERESTARTSYS && page)
		ClearPageUptodate(page);
	if (ctl.idx == -1)
		goto invalid_cache;	/* retry */
	ctl.head.end = ctl.fpos - 1;
	ctl.head.eof = ctl.valid;
finished:
	if (page) {
		cache->head = ctl.head;
		kunmap(page);
		if (result != -ERESTARTSYS)
			SetPageUptodate(page);
		unlock_page(page);
		page_cache_release(page);
	}
	if (ctl.page) {
		kunmap(ctl.page);
		SetPageUptodate(ctl.page);
		unlock_page(ctl.page);
		page_cache_release(ctl.page);
	}
out:
	unlock_kernel();
	return result;
}
Exemplo n.º 6
0
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
						struct vm_fault *vmf)
{
	struct page *page = vmf->page;
	struct inode *inode = file_inode(vma->vm_file);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	int err;

	f2fs_balance_fs(sbi);

	sb_start_pagefault(inode->i_sb);

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));

	/* block allocation */
	f2fs_lock_op(sbi);
	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = f2fs_reserve_block(&dn, page->index);
	if (err) {
		f2fs_unlock_op(sbi);
		goto out;
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);

	file_update_time(vma->vm_file);
	lock_page(page);
	if (unlikely(page->mapping != inode->i_mapping ||
			page_offset(page) > i_size_read(inode) ||
			!PageUptodate(page))) {
		unlock_page(page);
		err = -EFAULT;
		goto out;
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
		goto mapped;

	/* page is wholly or partially inside EOF */
	if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
						i_size_read(inode)) {
		unsigned offset;
		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
	}
	set_page_dirty(page);
	SetPageUptodate(page);

	trace_f2fs_vm_page_mkwrite(page, DATA);
mapped:
	/* fill the page */
	f2fs_wait_on_page_writeback(page, DATA);

	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);

	/* if gced page is attached, don't write to cold segment */
	clear_cold_data(page);
out:
	sb_end_pagefault(inode->i_sb);
	return block_page_mkwrite_return(err);
}
Exemplo n.º 7
0
static void ext4_end_bio(struct bio *bio, int error)
{
	ext4_io_end_t *io_end = bio->bi_private;
	struct workqueue_struct *wq;
	struct inode *inode;
	unsigned long flags;
	int i;

	BUG_ON(!io_end);
	bio->bi_private = NULL;
	bio->bi_end_io = NULL;
	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
		error = 0;
	bio_put(bio);

	for (i = 0; i < io_end->num_io_pages; i++) {
		struct page *page = io_end->pages[i]->p_page;
		struct buffer_head *bh, *head;
		int partial_write = 0;

		head = page_buffers(page);
		if (error)
			SetPageError(page);
		BUG_ON(!head);
		if (head->b_size == PAGE_CACHE_SIZE)
			clear_buffer_dirty(head);
		else {
			loff_t offset;
			loff_t io_end_offset = io_end->offset + io_end->size;

			offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
			bh = head;
			do {
				if ((offset >= io_end->offset) &&
				    (offset+bh->b_size <= io_end_offset)) {
					if (error)
						buffer_io_error(bh);

					clear_buffer_dirty(bh);
				}
				if (buffer_delay(bh))
					partial_write = 1;
				else if (!buffer_mapped(bh))
					clear_buffer_dirty(bh);
				else if (buffer_dirty(bh))
					partial_write = 1;
				offset += bh->b_size;
				bh = bh->b_this_page;
			} while (bh != head);
		}

		/*
		 * If this is a partial write which happened to make
		 * all buffers uptodate then we can optimize away a
		 * bogus readpage() for the next read(). Here we
		 * 'discover' whether the page went uptodate as a
		 * result of this (potentially partial) write.
		 */
		if (!partial_write)
			SetPageUptodate(page);

		put_io_page(io_end->pages[i]);
	}
	io_end->num_io_pages = 0;
	inode = io_end->inode;

	if (error) {
		io_end->flag |= EXT4_IO_END_ERROR;
		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
			     "(offset %llu size %ld starting block %llu)",
			     inode->i_ino,
			     (unsigned long long) io_end->offset,
			     (long) io_end->size,
			     (unsigned long long)
			     bio->bi_sector >> (inode->i_blkbits - 9));
	}

	/* Add the io_end to per-inode completed io list*/
	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
	list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
	spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);

	wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
	/* queue the work to convert unwritten extents to written */
	queue_work(wq, &io_end->work);
}
static int wrapfs_readpage(struct file *file, struct page *page){

    int err;
    struct file *lower_file;
    struct inode *inode;
    mm_segment_t old_fs;
    char *page_data = NULL;
    mode_t orig_mode;
    char *decrypted_data = NULL;
    struct wrapfs_sb_info *sbi = NULL;
    size_t page_len = (size_t)PAGE_CACHE_SIZE;
    
    
    DEBUGMSG("INSIDE READPAGE!!");
    sbi = (struct wrapfs_sb_info*)file->f_path.dentry->d_sb->s_fs_info;
    DEBUGMSG("KEY IN READPAGE IS BELOW");
    DEBUGMSG(sbi->sb_key);
    

    //For decryption
    decrypted_data = kmalloc(PAGE_CACHE_SIZE + PAD, GFP_KERNEL);
    if(!decrypted_data || IS_ERR(decrypted_data)){
        ERR;
        err = PTR_ERR(decrypted_data);
        goto out;
    }
    memset(decrypted_data, 0, PAGE_CACHE_SIZE + PAD);

    /* Commented: 
    wrapfs_read_lock(file->f_path.dentry->d_sb, UNIONFS_SMUTEX_PARENT);
    err = wrapfs_file_revalidate(file, false);
    if(unlikely(err)){
        goto out;
    }
    wrapfs_check_file(file);
    */

    lower_file = wrapfs_lower_file(file);
    /* FIXME: is this assertion right here? */
    BUG_ON(lower_file == NULL);
    inode = file->f_path.dentry->d_inode;
 
    page_data = (char *)kmap(page);
    /*
    * Use vfs_read because some lower file systems don't have a
    * readpage method, and some file systems (esp. distributed ones)
    * don't like their pages to be accessed directly.  Using vfs_read
    * may be a little slower, but a lot safer, as the VFS does a lot of
    * the necessary magic for us.
    */
    lower_file->f_pos = page_offset(page);

    old_fs = get_fs();
    set_fs(KERNEL_DS);

    /*
    * generic_file_splice_write may call us on a file not opened for
    * reading, so temporarily allow reading.
    */
    orig_mode = lower_file->f_mode;
    lower_file->f_mode |= FMODE_READ;

#ifdef WRAPFS_CRYPTO
    //For Decryption
    if(sbi->sb_key != NULL){
        DEBUGMSG("Reading Decrypted Data");
        err = vfs_read(lower_file, decrypted_data, PAGE_CACHE_SIZE + PAD, &lower_file->f_pos);
    }
    else{
#endif
        DEBUGMSG("Reading Normal Data");
        err = vfs_read(lower_file, page_data, PAGE_CACHE_SIZE, &lower_file->f_pos);
#ifdef WRAPFS_CRYPTO
    }

    
    //For Decryption
    if(sbi->sb_key != NULL){
        DEBUGMSG("Performing Decryption");
        ceph_aes_decrypt(sbi->sb_key, 16, page_data, &page_len, decrypted_data, err);
    }
    else{
        DEBUGMSG("Not Performing Decryption");
    }
#endif
    lower_file->f_mode = orig_mode;
    
    set_fs(old_fs);
    if (err >= 0 && err < PAGE_CACHE_SIZE)
        memset(page_data + err, 0, PAGE_CACHE_SIZE - err);
    
    kunmap(page);
            
    if (err < 0)
        goto out;
    err = 0;

    fsstack_copy_attr_times(inode, lower_file->f_path.dentry->d_inode);
    flush_dcache_page(page);
                                                                       
out:
    if (err == 0)
        SetPageUptodate(page);
    else
        ClearPageUptodate(page);
                                                                                  
    unlock_page(page);
    /*Commented: 
    unionfs_check_file(file);

    unionfs_read_unlock(file->f_path.dentry->d_sb);
    */
    return err;         
}
Exemplo n.º 9
0
/**
 * ecryptfs_readpage
 * @file: An eCryptfs file
 * @page: Page from eCryptfs inode mapping into which to stick the read data
 *
 * Read in a page, decrypting if necessary.
 *
 * Returns zero on success; non-zero on error.
 */
static int ecryptfs_readpage(struct file *file, struct page *page)
{
	struct ecryptfs_crypt_stat *crypt_stat =
		&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
#ifdef CONFIG_CRYPTO_DEV_KFIPS
	struct ecryptfs_page_crypt_req *page_crypt_req = NULL;
#endif
	int rc = 0;

	if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
		rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
						      PAGE_CACHE_SIZE,
						      page->mapping->host);
	} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
		if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
			rc = ecryptfs_copy_up_encrypted_with_header(page,
								    crypt_stat);
			if (rc) {
				printk(KERN_ERR "%s: Error attempting to copy "
				       "the encrypted content from the lower "
				       "file whilst inserting the metadata "
				       "from the xattr into the header; rc = "
				       "[%d]\n", __func__, rc);
				goto out;
			}

		} else {
			rc = ecryptfs_read_lower_page_segment(
				page, page->index, 0, PAGE_CACHE_SIZE,
				page->mapping->host);
			if (rc) {
				printk(KERN_ERR "Error reading page; rc = "
				       "[%d]\n", rc);
				goto out;
			}
		}
	} else {
#ifndef CONFIG_CRYPTO_DEV_KFIPS
		rc = ecryptfs_decrypt_page(page);
		if (rc) {
			ecryptfs_printk(KERN_ERR, "Error decrypting page; "
					"rc = [%d]\n", rc);
#else
		page_crypt_req = ecryptfs_alloc_page_crypt_req(
					page, ecryptfs_readpage_complete);
		if (!page_crypt_req) {
			rc = -ENOMEM;
			ecryptfs_printk(KERN_ERR,
					"Failed to allocate page crypt request "
					"for decryption\n");
#endif
			goto out;
		}
#ifdef CONFIG_CRYPTO_DEV_KFIPS
		ecryptfs_decrypt_page_async(page_crypt_req);
		goto out_async_started;
#endif
	}
out:
#ifndef CONFIG_CRYPTO_DEV_KFIPS
	if (rc)
#else
	if (unlikely(rc))
#endif
		ClearPageUptodate(page);
	else
		SetPageUptodate(page);
	ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16lx]\n",
			page->index);
	unlock_page(page);
#ifdef CONFIG_CRYPTO_DEV_KFIPS
out_async_started:
#endif
	return rc;
}

/**
 * Called with lower inode mutex held.
 */
static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
{
	struct inode *inode = page->mapping->host;
	int end_byte_in_page;

	if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index)
		goto out;
	end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
	if (to > end_byte_in_page)
		end_byte_in_page = to;
	zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
out:
	return 0;
}

/**
 * ecryptfs_write_begin
 * @file: The eCryptfs file
 * @mapping: The eCryptfs object
 * @pos: The file offset at which to start writing
 * @len: Length of the write
 * @flags: Various flags
 * @pagep: Pointer to return the page
 * @fsdata: Pointer to return fs data (unused)
 *
 * This function must zero any hole we create
 *
 * Returns zero on success; non-zero otherwise
 */
static int ecryptfs_write_begin(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned flags,
			struct page **pagep, void **fsdata)
{
	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
	struct page *page;
	loff_t prev_page_end_size;
	int rc = 0;

	page = grab_cache_page_write_begin(mapping, index, flags);
	if (!page)
		return -ENOMEM;
	*pagep = page;

	prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT);
	if (!PageUptodate(page)) {
		struct ecryptfs_crypt_stat *crypt_stat =
			&ecryptfs_inode_to_private(mapping->host)->crypt_stat;

		if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
			rc = ecryptfs_read_lower_page_segment(
				page, index, 0, PAGE_CACHE_SIZE, mapping->host);
			if (rc) {
				printk(KERN_ERR "%s: Error attemping to read "
				       "lower page segment; rc = [%d]\n",
				       __func__, rc);
				ClearPageUptodate(page);
				goto out;
			} else
				SetPageUptodate(page);
		} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
			if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
				rc = ecryptfs_copy_up_encrypted_with_header(
					page, crypt_stat);
				if (rc) {
					printk(KERN_ERR "%s: Error attempting "
					       "to copy the encrypted content "
					       "from the lower file whilst "
					       "inserting the metadata from "
					       "the xattr into the header; rc "
					       "= [%d]\n", __func__, rc);
					ClearPageUptodate(page);
					goto out;
				}
				SetPageUptodate(page);
			} else {
				rc = ecryptfs_read_lower_page_segment(
					page, index, 0, PAGE_CACHE_SIZE,
					mapping->host);
				if (rc) {
					printk(KERN_ERR "%s: Error reading "
					       "page; rc = [%d]\n",
					       __func__, rc);
					ClearPageUptodate(page);
					goto out;
				}
				SetPageUptodate(page);
			}
		} else {
			if (prev_page_end_size
			    >= i_size_read(page->mapping->host)) {
				zero_user(page, 0, PAGE_CACHE_SIZE);
			} else {
				rc = ecryptfs_decrypt_page(page);
				if (rc) {
					printk(KERN_ERR "%s: Error decrypting "
					       "page at index [%ld]; "
					       "rc = [%d]\n",
					       __func__, page->index, rc);
					ClearPageUptodate(page);
					goto out;
				}
			}
			SetPageUptodate(page);
		}
	}
	/* If creating a page or more of holes, zero them out via truncate.
	 * Note, this will increase i_size. */
	if (index != 0) {
		if (prev_page_end_size > i_size_read(page->mapping->host)) {
			rc = ecryptfs_truncate(file->f_path.dentry,
					       prev_page_end_size);
			if (rc) {
				printk(KERN_ERR "%s: Error on attempt to "
				       "truncate to (higher) offset [%lld];"
				       " rc = [%d]\n", __func__,
				       prev_page_end_size, rc);
				goto out;
			}
		}
	}
	/* Writing to a new page, and creating a small hole from start
	 * of page?  Zero it out. */
	if ((i_size_read(mapping->host) == prev_page_end_size)
	    && (pos != 0))
		zero_user(page, 0, PAGE_CACHE_SIZE);
out:
	if (unlikely(rc)) {
		unlock_page(page);
		page_cache_release(page);
		*pagep = NULL;
	}
	return rc;
}
Exemplo n.º 10
0
/*
 * AFS read page from file, directory or symlink
 */
static int afs_readpage(struct file *file, struct page *page)
{
	struct afs_vnode *vnode;
	struct inode *inode;
	struct key *key;
	size_t len;
	off_t offset;
	int ret;

	inode = page->mapping->host;

	ASSERT(file != NULL);
	key = file->private_data;
	ASSERT(key != NULL);

	_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);

	vnode = AFS_FS_I(inode);

	BUG_ON(!PageLocked(page));

	ret = -ESTALE;
	if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
		goto error;

#ifdef AFS_CACHING_SUPPORT
	/* is it cached? */
	ret = cachefs_read_or_alloc_page(vnode->cache,
					 page,
					 afs_file_readpage_read_complete,
					 NULL,
					 GFP_KERNEL);
#else
	ret = -ENOBUFS;
#endif

	switch (ret) {
		/* read BIO submitted and wb-journal entry found */
	case 1:
		BUG(); // TODO - handle wb-journal match

		/* read BIO submitted (page in cache) */
	case 0:
		break;

		/* no page available in cache */
	case -ENOBUFS:
	case -ENODATA:
	default:
		offset = page->index << PAGE_CACHE_SHIFT;
		len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);

		/* read the contents of the file from the server into the
		 * page */
		ret = afs_vnode_fetch_data(vnode, key, offset, len, page);
		if (ret < 0) {
			if (ret == -ENOENT) {
				_debug("got NOENT from server"
				       " - marking file deleted and stale");
				set_bit(AFS_VNODE_DELETED, &vnode->flags);
				ret = -ESTALE;
			}
#ifdef AFS_CACHING_SUPPORT
			cachefs_uncache_page(vnode->cache, page);
#endif
			goto error;
		}

		SetPageUptodate(page);

#ifdef AFS_CACHING_SUPPORT
		if (cachefs_write_page(vnode->cache,
				       page,
				       afs_file_readpage_write_complete,
				       NULL,
				       GFP_KERNEL) != 0
		    ) {
			cachefs_uncache_page(vnode->cache, page);
			unlock_page(page);
		}
#else
		unlock_page(page);
#endif
	}

	_leave(" = 0");
	return 0;

error:
	SetPageError(page);
	unlock_page(page);
	_leave(" = %d", ret);
	return ret;
}
Exemplo n.º 11
0
/*
 * ecryptfs_readpages
 * This is kernel code, not Java.
 *
 * Read in multiple pages and decrypt them if necessary.
 */
static int ecryptfs_readpages(struct file *filp, struct address_space *mapping,
			      struct list_head *pages, unsigned nr_pages)
{
	struct ecryptfs_crypt_stat *crypt_stat =
	    		&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
	struct page **pgs = NULL;
	unsigned int page_idx = 0;
	int rc = 0;
	int nodec = 0;	//no decryption needed flag
	/* u32 sz = 0;  */

	if (!crypt_stat
	    || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
	    || (crypt_stat->flags & ECRYPTFS_NEW_FILE)
	    || (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED)) {
	    nodec = 1;
	}

	if (!nodec) {
	    /* sz = __ilog2_u32((u32)__roundup_pow_of_two(nr_pages*sizeof(struct page*))); */
	    /* pgs = (struct page **)__get_free_pages(GFP_KERNEL, sz); */
	    pgs = (struct page **)kmalloc(nr_pages*sizeof(struct page*), GFP_KERNEL);
	    if (!pgs) {
		return -EFAULT;
	    }
	}

	/* printk("[g-ecryptfs] Info: in read_pages read %d pages %d: \n", nr_pages, nodec); */
	/* dump_stack(); */

	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
	    struct page *page = list_entry(pages->prev, struct page, lru);
	    list_del(&page->lru);
	    if (add_to_page_cache_lru(page, mapping, page->index, GFP_KERNEL)) {
			printk("[g-eCryptfs] INFO: cannot add page %lu to cache lru\n",
			       (unsigned long)(page->index));
	    } else {
		if (nodec)
		    rc |= ecryptfs_readpage(filp, page);
	    }

	    if (nodec)
			page_cache_release(page);
	    else
			pgs[page_idx] = page;
	}

	if (!nodec) {
	    rc = ecryptfs_decrypt_pages(pgs, nr_pages);

	    for (page_idx = 0; page_idx < nr_pages; page_idx++) {

		if (rc)
		    ClearPageUptodate(pgs[page_idx]);
		else
		    SetPageUptodate(pgs[page_idx]);
		unlock_page(pgs[page_idx]);

		page_cache_release(pgs[page_idx]);
	    }

	    kfree(pgs);
	    /* free_pages((unsigned long)pgs, sz); */
	}

	return 0;
}
Exemplo n.º 12
0
/* Read separately compressed datablock directly into page cache */
int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)

{
	struct inode *inode = target_page->mapping->host;
	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;

	int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
	int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
	int start_index = target_page->index & ~mask;
	int end_index = start_index | mask;
	int i, n, pages, missing_pages, bytes, res = -ENOMEM;
	struct page **page;
	struct squashfs_page_actor *actor;
	void *pageaddr;

	if (end_index > file_end)
		end_index = file_end;

	pages = end_index - start_index + 1;

	page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
	if (page == NULL)
		return res;

	/*
	 * Create a "page actor" which will kmap and kunmap the
	 * page cache pages appropriately within the decompressor
	 */
	actor = squashfs_page_actor_init_special(page, pages, 0);
	if (actor == NULL)
		goto out;

	/* Try to grab all the pages covered by the Squashfs block */
	for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
		page[i] = (n == target_page->index) ? target_page :
			grab_cache_page_nowait(target_page->mapping, n);

		if (page[i] == NULL) {
			missing_pages++;
			continue;
		}

		if (PageUptodate(page[i])) {
			unlock_page(page[i]);
			put_page(page[i]);
			page[i] = NULL;
			missing_pages++;
		}
	}

	if (missing_pages) {
		/*
		 * Couldn't get one or more pages, this page has either
		 * been VM reclaimed, but others are still in the page cache
		 * and uptodate, or we're racing with another thread in
		 * squashfs_readpage also trying to grab them.  Fall back to
		 * using an intermediate buffer.
		 */
		res = squashfs_read_cache(target_page, block, bsize, pages,
								page);
		if (res < 0)
			goto mark_errored;

		goto out;
	}

	/* Decompress directly into the page cache buffers */
	res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
	if (res < 0)
		goto mark_errored;

	/* Last page may have trailing bytes not filled */
	bytes = res % PAGE_SIZE;
	if (bytes) {
		pageaddr = kmap_atomic(page[pages - 1]);
		memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
		kunmap_atomic(pageaddr);
	}

	/* Mark pages as uptodate, unlock and release */
	for (i = 0; i < pages; i++) {
		flush_dcache_page(page[i]);
		SetPageUptodate(page[i]);
		unlock_page(page[i]);
		if (page[i] != target_page)
			put_page(page[i]);
	}

	kfree(actor);
	kfree(page);

	return 0;

mark_errored:
	/* Decompression failed, mark pages as errored.  Target_page is
	 * dealt with by the caller
	 */
	for (i = 0; i < pages; i++) {
		if (page[i] == NULL || page[i] == target_page)
			continue;
		flush_dcache_page(page[i]);
		SetPageError(page[i]);
		unlock_page(page[i]);
		put_page(page[i]);
	}

out:
	kfree(actor);
	kfree(page);
	return res;
}
void end_swap_bio_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct page *page = bio->bi_io_vec[0].bv_page;

	if (!uptodate) {
		SetPageError(page);
		ClearPageUptodate(page);
		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
				imajor(bio->bi_bdev->bd_inode),
				iminor(bio->bi_bdev->bd_inode),
				(unsigned long long)bio->bi_sector);
		goto out;
	}

	SetPageUptodate(page);

	/*
	 * There is no guarantee that the page is in swap cache - the software
	 * suspend code (at least) uses end_swap_bio_read() against a non-
	 * swapcache page.  So we must check PG_swapcache before proceeding with
	 * this optimization.
	 */
	if (likely(PageSwapCache(page))) {
		struct swap_info_struct *sis;

		sis = page_swap_info(page);
		if (sis->flags & SWP_BLKDEV) {
			/*
			 * The swap subsystem performs lazy swap slot freeing,
			 * expecting that the page will be swapped out again.
			 * So we can avoid an unnecessary write if the page
			 * isn't redirtied.
			 * This is good for real swap storage because we can
			 * reduce unnecessary I/O and enhance wear-leveling
			 * if an SSD is used as the as swap device.
			 * But if in-memory swap device (eg zram) is used,
			 * this causes a duplicated copy between uncompressed
			 * data in VM-owned memory and compressed data in
			 * zram-owned memory.  So let's free zram-owned memory
			 * and make the VM-owned decompressed page *dirty*,
			 * so the page should be swapped out somewhere again if
			 * we again wish to reclaim it.
			 */
			struct gendisk *disk = sis->bdev->bd_disk;
			if (disk->fops->swap_slot_free_notify) {
				swp_entry_t entry;
				unsigned long offset;

				entry.val = page_private(page);
				offset = swp_offset(entry);

				SetPageDirty(page);
				disk->fops->swap_slot_free_notify(sis->bdev,
						offset);
			}
		}
	}

out:
	unlock_page(page);
	bio_put(bio);
}
Exemplo n.º 14
0
// corresponds to hammer_vop_strategy_read
int hammerfs_readpage(struct file *file, struct page *page) 
{
    void *page_addr;
    hammer_mount_t hmp;
    struct buffer_head *bh;
    struct super_block *sb;
    struct hammer_transaction trans;
    struct hammer_cursor cursor;
    struct inode *inode;
    struct hammer_inode *ip;
    hammer_base_elm_t base;
    hammer_off_t disk_offset;
    int64_t rec_offset;
    int64_t file_offset;
    int error = 0;
    int boff;
    int roff;
    int n;
    int i=0;
    int block_num;
    int block_offset;
    int bytes_read;
    int64_t sb_offset;
    hammer_off_t zone2_offset;
    int vol_no;
    hammer_volume_t volume;

    printk ("hammerfs_readpage(page->index=%d)\n", (int) page->index);

    inode = file->f_path.dentry->d_inode;
    ip = (struct hammer_inode *)inode->i_private;
    sb = inode->i_sb;
    hmp = (hammer_mount_t)sb->s_fs_info;
    hammer_simple_transaction(&trans, ip->hmp);
    hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
    file_offset = page->index * PAGE_SIZE;

    if (file_offset > inode->i_size) {
        error = -ENOSPC;
        goto done;
    }

    SetPageUptodate (page);
    page_addr = kmap (page);

    if(!page_addr) {
        error = -ENOSPC;
        goto failed;
    }

   /*
    * Key range (begin and end inclusive) to scan.  Note that the key's
    * stored in the actual records represent BASE+LEN, not BASE.  The
    * first record containing bio_offset will have a key > bio_offset.
    */
    cursor.key_beg.localization = ip->obj_localization +
                                  HAMMER_LOCALIZE_MISC;
    cursor.key_beg.obj_id = ip->obj_id;
    cursor.key_beg.create_tid = 0;
    cursor.key_beg.delete_tid = 0;
    cursor.key_beg.obj_type = 0;
    cursor.key_beg.key = file_offset + 1;
    cursor.asof = ip->obj_asof;
    cursor.flags |= HAMMER_CURSOR_ASOF;

    cursor.key_end = cursor.key_beg;
    KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);

    cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
    cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
    cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
    cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;

    error = hammer_ip_first(&cursor);
    boff = 0;

    while(error == 0) {
       /*
        * Get the base file offset of the record.  The key for
        * data records is (base + bytes) rather then (base).
        */
        base = &cursor.leaf->base;
        rec_offset = base->key - cursor.leaf->data_len;

       /*
        * Calculate the gap, if any, and zero-fill it.
        *
        * n is the offset of the start of the record verses our
        * current seek offset in the bio.
        */
        n = (int)(rec_offset - (file_offset + boff));
        if (n > 0) {
            if (n > PAGE_SIZE - boff)
                n = PAGE_SIZE - boff;
            bzero((char *)page_addr + boff, n);
            boff += n;
            n = 0;
        }

       /*
        * Calculate the data offset in the record and the number
        * of bytes we can copy.
        *
        * There are two degenerate cases.  First, boff may already
        * be at bp->b_bufsize.  Secondly, the data offset within
        * the record may exceed the record's size.
        */
        roff = -n;
        rec_offset += roff;
        n = cursor.leaf->data_len - roff;
        if (n <= 0) {
            printk("hammerfs_readpage: bad n=%d roff=%d\n", n, roff);
            n = 0;
        } else if (n > PAGE_SIZE - boff) {
            n = PAGE_SIZE - boff;
        }

       /*
        * Deal with cached truncations.  This cool bit of code
        * allows truncate()/ftruncate() to avoid having to sync
        * the file.
        *
        * If the frontend is truncated then all backend records are
        * subject to the frontend's truncation.
        *
        * If the backend is truncated then backend records on-disk
        * (but not in-memory) are subject to the backend's
        * truncation.  In-memory records owned by the backend
        * represent data written after the truncation point on the
        * backend and must not be truncated.
        *
        * Truncate operations deal with frontend buffer cache
        * buffers and frontend-owned in-memory records synchronously.
        */
       if (ip->flags & HAMMER_INODE_TRUNCATED) {
               if (hammer_cursor_ondisk(&cursor) ||
                   cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
                       if (ip->trunc_off <= rec_offset)
                               n = 0;
                       else if (ip->trunc_off < rec_offset + n)
                               n = (int)(ip->trunc_off - rec_offset);
               }
       }
       if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
               if (hammer_cursor_ondisk(&cursor)) {
                       if (ip->sync_trunc_off <= rec_offset)
                               n = 0;
                       else if (ip->sync_trunc_off < rec_offset + n)
                               n = (int)(ip->sync_trunc_off - rec_offset);
               }
       }

       /*
        * Calculate the data offset in the record and the number
        * of bytes we can copy.
        */
        disk_offset = cursor.leaf->data_offset + roff;

        // move this to hammerfs_direct_io_read
        zone2_offset = hammer_blockmap_lookup(hmp, disk_offset, &error);
        vol_no = HAMMER_VOL_DECODE(zone2_offset);
        volume = hammer_get_volume(hmp, vol_no, &error);

        // n is the number of bytes we should read, sb_offset the
        // offset on disk
        sb_offset = volume->ondisk->vol_buf_beg + (zone2_offset & HAMMER_OFF_SHORT_MASK);

        while(n > 0 && boff != PAGE_SIZE) {
            block_num = sb_offset / BLOCK_SIZE;
            block_offset = sb_offset % BLOCK_SIZE;

            // the minimum between what is available and what we can maximally provide
            bytes_read = min(BLOCK_SIZE - (int )block_offset, PAGE_SIZE - (int )boff);        

            bh = sb_bread(sb, block_num + i);
            if(!bh) {
                error = -ENOMEM;
                goto failed;
            }
            memcpy((char*)page_addr + roff, (char*)bh->b_data + boff + block_offset, bytes_read);
            brelse(bh);

            n -= bytes_read;
            boff += bytes_read;
            roff += bytes_read;
        }

       /*
        * Iterate until we have filled the request.
        */
        if (boff == PAGE_SIZE)
            break;
        error = hammer_ip_next(&cursor);
    }

    hammer_done_cursor(&cursor);
    hammer_done_transaction(&trans);

failed:
    if (PageLocked (page))
        unlock_page (page);
    kunmap (page);
done:
    return error;
}
Exemplo n.º 15
0
struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
	struct extent_info ei;
	int err;
	struct f2fs_io_info fio = {
		.sbi = F2FS_I_SB(inode),
		.type = DATA,
		.rw = rw,
		.encrypted_page = NULL,
	};

	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

	page = grab_cache_page(mapping, index);
	if (!page)
		return ERR_PTR(-ENOMEM);

	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
	if (err)
		goto put_err;
	f2fs_put_dnode(&dn);

	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
		err = -ENOENT;
		goto put_err;
	}
got_it:
	if (PageUptodate(page)) {
		unlock_page(page);
		return page;
	}

	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		SetPageUptodate(page);
		unlock_page(page);
		return page;
	}

	fio.blk_addr = dn.data_blkaddr;
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
	if (err)
		goto put_err;
	return page;

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
}
Exemplo n.º 16
0
/**
 * ecryptfs_writepage
 * @page: Page that is locked before this call is made
 *
 * Returns zero on success; non-zero otherwise
 *
 * This is where we encrypt the data and pass the encrypted data to
 * the lower filesystem.  In OpenPGP-compatible mode, we operate on
 * entire underlying packets.
 */
static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
#ifndef CONFIG_CRYPTO_DEV_KFIPS
	int rc;
#else
	struct ecryptfs_page_crypt_req *page_crypt_req;
	int rc = 0;
#endif
#if 1 // FEATURE_SDCARD_ENCRYPTION
	struct inode *ecryptfs_inode;
	struct ecryptfs_crypt_stat *crypt_stat =
		&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
	ecryptfs_inode = page->mapping->host;
#endif

	/*
	 * Refuse to write the page out if we are called from reclaim context
	 * since our writepage() path may potentially allocate memory when
	 * calling into the lower fs vfs_write() which may in turn invoke
	 * us again.
	 */
	if (current->flags & PF_MEMALLOC) {
		redirty_page_for_writepage(wbc, page);
#ifndef CONFIG_CRYPTO_DEV_KFIPS
		rc = 0;
#endif
		goto out;
	}

#if 1 // FEATURE_SDCARD_ENCRYPTION
	if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
		ecryptfs_printk(KERN_DEBUG,
				"Passing through unencrypted page\n");
		rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page,
			0, PAGE_CACHE_SIZE);
		if (rc) {
			ClearPageUptodate(page);
			goto out;
		}
		SetPageUptodate(page);
	} else {
#ifndef CONFIG_CRYPTO_DEV_KFIPS
	rc = ecryptfs_encrypt_page(page);
	if (rc) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
#else
//	rc = ecryptfs_encrypt_page(page);
//	if (rc) {
//		ecryptfs_printk(KERN_WARNING, "Error encrypting "
//				"page (upper index [0x%.16lx])\n", page->index);
//		ClearPageUptodate(page);
	page_crypt_req = ecryptfs_alloc_page_crypt_req(
				page, ecryptfs_writepage_complete);
	if (unlikely(!page_crypt_req)) {
		rc = -ENOMEM;
		ecryptfs_printk(KERN_ERR,
				"Failed to allocate page crypt request "
				"for encryption\n");
#endif
		goto out;
	}
#ifndef CONFIG_CRYPTO_DEV_KFIPS
	SetPageUptodate(page);
#else
//	SetPageUptodate(page);
	set_page_writeback(page);
	ecryptfs_encrypt_page_async(page_crypt_req);
#endif
	}
#else
	rc = ecryptfs_encrypt_page(page);
	if (rc) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
		goto out;
	}
	SetPageUptodate(page);
#endif
out:
	unlock_page(page);
	return rc;
}

static void strip_xattr_flag(char *page_virt,
			     struct ecryptfs_crypt_stat *crypt_stat)
{
	if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
		size_t written;

		crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR;
		ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat,
						&written);
		crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
	}
}

/**
 *   Header Extent:
 *     Octets 0-7:        Unencrypted file size (big-endian)
 *     Octets 8-15:       eCryptfs special marker
 *     Octets 16-19:      Flags
 *      Octet 16:         File format version number (between 0 and 255)
 *      Octets 17-18:     Reserved
 *      Octet 19:         Bit 1 (lsb): Reserved
 *                        Bit 2: Encrypted?
 *                        Bits 3-8: Reserved
 *     Octets 20-23:      Header extent size (big-endian)
 *     Octets 24-25:      Number of header extents at front of file
 *                        (big-endian)
 *     Octet  26:         Begin RFC 2440 authentication token packet set
 */

/**
 * ecryptfs_copy_up_encrypted_with_header
 * @page: Sort of a ``virtual'' representation of the encrypted lower
 *        file. The actual lower file does not have the metadata in
 *        the header. This is locked.
 * @crypt_stat: The eCryptfs inode's cryptographic context
 *
 * The ``view'' is the version of the file that userspace winds up
 * seeing, with the header information inserted.
 */
static int
ecryptfs_copy_up_encrypted_with_header(struct page *page,
				       struct ecryptfs_crypt_stat *crypt_stat)
{
	loff_t extent_num_in_page = 0;
	loff_t num_extents_per_page = (PAGE_CACHE_SIZE
				       / crypt_stat->extent_size);
	int rc = 0;

	while (extent_num_in_page < num_extents_per_page) {
		loff_t view_extent_num = ((((loff_t)page->index)
					   * num_extents_per_page)
					  + extent_num_in_page);
		size_t num_header_extents_at_front =
			(crypt_stat->metadata_size / crypt_stat->extent_size);

		if (view_extent_num < num_header_extents_at_front) {
			/* This is a header extent */
			char *page_virt;

			page_virt = kmap_atomic(page);
			memset(page_virt, 0, PAGE_CACHE_SIZE);
			/* TODO: Support more than one header extent */
			if (view_extent_num == 0) {
				size_t written;

				rc = ecryptfs_read_xattr_region(
					page_virt, page->mapping->host);
				strip_xattr_flag(page_virt + 16, crypt_stat);
				ecryptfs_write_header_metadata(page_virt + 20,
							       crypt_stat,
							       &written);
			}
			kunmap_atomic(page_virt);
			flush_dcache_page(page);
			if (rc) {
				printk(KERN_ERR "%s: Error reading xattr "
				       "region; rc = [%d]\n", __func__, rc);
				goto out;
			}
		} else {
			/* This is an encrypted data extent */
			loff_t lower_offset =
				((view_extent_num * crypt_stat->extent_size)
				 - crypt_stat->metadata_size);

			rc = ecryptfs_read_lower_page_segment(
				page, (lower_offset >> PAGE_CACHE_SHIFT),
				(lower_offset & ~PAGE_CACHE_MASK),
				crypt_stat->extent_size, page->mapping->host);
			if (rc) {
				printk(KERN_ERR "%s: Error attempting to read "
				       "extent at offset [%lld] in the lower "
				       "file; rc = [%d]\n", __func__,
				       lower_offset, rc);
				goto out;
			}
		}
		extent_num_in_page++;
	}
out:
	return rc;
}
Exemplo n.º 17
0
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct block_device *bdev = inode->i_sb->s_bdev;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
						  page->index, GFP_KERNEL))
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

			if (f2fs_map_blocks(inode, &map, 0, false))
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
			zero_user_segment(page, 0, PAGE_CACHE_SIZE);
			SetPageUptodate(page);
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
			submit_bio(READ, bio);
			bio = NULL;
		}
		if (bio == NULL) {
			struct f2fs_crypto_ctx *ctx = NULL;

			if (f2fs_encrypted_inode(inode) &&
					S_ISREG(inode->i_mode)) {
				struct page *cpage;

				ctx = f2fs_get_crypto_ctx(inode);
				if (IS_ERR(ctx))
					goto set_error_page;

				/* wait the page to be moved by cleaning */
				cpage = find_lock_page(
						META_MAPPING(F2FS_I_SB(inode)),
						block_nr);
				if (cpage) {
					f2fs_wait_on_page_writeback(cpage,
									DATA);
					f2fs_put_page(cpage, 1);
				}
			}

			bio = bio_alloc(GFP_KERNEL,
				min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
			if (!bio) {
				if (ctx)
					f2fs_release_crypto_ctx(ctx);
				goto set_error_page;
			}
			bio->bi_bdev = bdev;
			bio->bi_sector = SECTOR_FROM_BLOCK(block_nr);
			bio->bi_end_io = f2fs_read_end_io;
			bio->bi_private = ctx;
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
			submit_bio(READ, bio);
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
			page_cache_release(page);
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
		submit_bio(READ, bio);
	return 0;
}
Exemplo n.º 18
0
static int squashfs_symlink_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct super_block *sb = inode->i_sb;
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	int index = page->index << PAGE_CACHE_SHIFT;
	u64 block = squashfs_i(inode)->start;
	int offset = squashfs_i(inode)->offset;
	int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE);
	int bytes, copied;
	void *pageaddr;
	struct squashfs_cache_entry *entry;

	TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
			"%llx, offset %x\n", page->index, block, offset);

	/*
	 * Skip index bytes into symlink metadata.
	 */
	if (index) {
		bytes = squashfs_read_metadata(sb, NULL, &block, &offset,
								index);
		if (bytes < 0) {
			ERROR("Unable to read symlink [%llx:%x]\n",
				squashfs_i(inode)->start,
				squashfs_i(inode)->offset);
			goto error_out;
		}
	}

	/*
	 * Read length bytes from symlink metadata.  Squashfs_read_metadata
	 * is not used here because it can sleep and we want to use
	 * kmap_atomic to map the page.  Instead call the underlying
	 * squashfs_cache_get routine.  As length bytes may overlap metadata
	 * blocks, we may need to call squashfs_cache_get multiple times.
	 */
	for (bytes = 0; bytes < length; offset = 0, bytes += copied) {
		entry = squashfs_cache_get(sb, msblk->block_cache, block, 0);
		if (entry->error) {
			ERROR("Unable to read symlink [%llx:%x]\n",
				squashfs_i(inode)->start,
				squashfs_i(inode)->offset);
			squashfs_cache_put(entry);
			goto error_out;
		}

		pageaddr = kmap_atomic(page, KM_USER0);
		copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
								length - bytes);
		if (copied == length - bytes)
			memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
		else
			block = entry->next_index;
		kunmap_atomic(pageaddr, KM_USER0);
		squashfs_cache_put(entry);
	}

	flush_dcache_page(page);
	SetPageUptodate(page);
	unlock_page(page);
	return 0;

error_out:
	SetPageError(page);
	unlock_page(page);
	return 0;
}
Exemplo n.º 19
0
/*
 * This is the worker routine which does all the work of mapping the disk
 * blocks and constructs largest possible bios, submits them for IO if the
 * blocks are not contiguous on the disk.
 *
 * We pass a buffer_head back and forth and use its buffer_mapped() flag to
 * represent the validity of its disk mapping and to decide when to do the next
 * get_block() call.
 */
static struct bio *
do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
		sector_t *last_block_in_bio, struct buffer_head *map_bh,
		unsigned long *first_logical_block, get_block_t get_block)
{
	struct _inode *inode = tx_cache_get_inode(page->mapping->host);
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t blocks[MAX_BUF_PER_PAGE];
	unsigned page_block;
	unsigned first_hole = blocks_per_page;
	struct block_device *bdev = NULL;
	int length;
	int fully_mapped = 1;
	unsigned nblocks;
	unsigned relative_block;

	if (page_has_buffers(page))
		goto confused;

	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
	last_block = block_in_file + nr_pages * blocks_per_page;
	last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
	if (last_block > last_block_in_file)
		last_block = last_block_in_file;
	page_block = 0;

	/*
	 * Map blocks using the result from the previous get_blocks call first.
	 */
	nblocks = map_bh->b_size >> blkbits;
	if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
			block_in_file < (*first_logical_block + nblocks)) {
		unsigned map_offset = block_in_file - *first_logical_block;
		unsigned last = nblocks - map_offset;

		for (relative_block = 0; ; relative_block++) {
			if (relative_block == last) {
				clear_buffer_mapped(map_bh);
				break;
			}
			if (page_block == blocks_per_page)
				break;
			blocks[page_block] = map_bh->b_blocknr + map_offset +
						relative_block;
			page_block++;
			block_in_file++;
		}
		bdev = map_bh->b_bdev;
	}

	/*
	 * Then do more get_blocks calls until we are done with this page.
	 */
	map_bh->b_page = page;
	while (page_block < blocks_per_page) {
		map_bh->b_state = 0;
		map_bh->b_size = 0;

		if (block_in_file < last_block) {
			map_bh->b_size = (last_block-block_in_file) << blkbits;
			if (get_block(inode, block_in_file, map_bh, 0))
				goto confused;
			*first_logical_block = block_in_file;
		}

		if (!buffer_mapped(map_bh)) {
			fully_mapped = 0;
			if (first_hole == blocks_per_page)
				first_hole = page_block;
			page_block++;
			block_in_file++;
			clear_buffer_mapped(map_bh);
			continue;
		}

		/* some filesystems will copy data into the page during
		 * the get_block call, in which case we don't want to
		 * read it again.  map_buffer_to_page copies the data
		 * we just collected from get_block into the page's buffers
		 * so readpage doesn't have to repeat the get_block call
		 */
		if (buffer_uptodate(map_bh)) {
			map_buffer_to_page(page, map_bh, page_block);
			goto confused;
		}
	
		if (first_hole != blocks_per_page)
			goto confused;		/* hole -> non-hole */

		/* Contiguous blocks? */
		if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
			goto confused;
		nblocks = map_bh->b_size >> blkbits;
		for (relative_block = 0; ; relative_block++) {
			if (relative_block == nblocks) {
				clear_buffer_mapped(map_bh);
				break;
			} else if (page_block == blocks_per_page)
				break;
			blocks[page_block] = map_bh->b_blocknr+relative_block;
			page_block++;
			block_in_file++;
		}
		bdev = map_bh->b_bdev;
	}

	if (first_hole != blocks_per_page) {
		zero_user_page(page, first_hole << blkbits,
				PAGE_CACHE_SIZE - (first_hole << blkbits),
				KM_USER0);
		if (first_hole == 0) {
			SetPageUptodate(page);
			unlock_page(page);
			goto out;
		}
	} else if (fully_mapped) {
		SetPageMappedToDisk(page);
	}

	/*
	 * This page will go to BIO.  Do we need to send this BIO off first?
	 */
	if (bio && (*last_block_in_bio != blocks[0] - 1))
		bio = mpage_bio_submit(READ, bio);

alloc_new:
	if (bio == NULL) {
		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
			  	min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
				GFP_KERNEL);
		if (bio == NULL)
			goto confused;
	}
Exemplo n.º 20
0
int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_node_frag *frag = f->fraglist;
	__u32 offset = pg->index << PAGE_CACHE_SHIFT;
	__u32 end = offset + PAGE_CACHE_SIZE;
	unsigned char *pg_buf;
	int ret;

	D1(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%x\n", inode->i_ino, offset));

	if (!PageLocked(pg))
                PAGE_BUG(pg);

	while(frag && frag->ofs + frag->size  <= offset) {
		//		D1(printk(KERN_DEBUG "skipping frag %d-%d; before the region we care about\n", frag->ofs, frag->ofs + frag->size));
		frag = frag->next;
	}

	pg_buf = kmap(pg);

	/* XXX FIXME: Where a single physical node actually shows up in two
	   frags, we read it twice. Don't do that. */
	/* Now we're pointing at the first frag which overlaps our page */
	while(offset < end) {
		D2(printk(KERN_DEBUG "jffs2_readpage: offset %d, end %d\n", offset, end));
		if (!frag || frag->ofs > offset) {
			__u32 holesize = end - offset;
			if (frag) {
				D1(printk(KERN_NOTICE "Eep. Hole in ino %ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", inode->i_ino, frag->ofs, offset));
				holesize = min(holesize, frag->ofs - offset);
				D1(jffs2_print_frag_list(f));
			}
			D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
			memset(pg_buf, 0, holesize);
			pg_buf += holesize;
			offset += holesize;
			continue;
		} else if (frag->ofs < offset && (offset & (PAGE_CACHE_SIZE-1)) != 0) {
			D1(printk(KERN_NOTICE "Eep. Overlap in ino #%ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n",
				  inode->i_ino, frag->ofs, offset));
			D1(jffs2_print_frag_list(f));
			memset(pg_buf, 0, end - offset);
			ClearPageUptodate(pg);
			SetPageError(pg);
			kunmap(pg);
			return -EIO;
		} else if (!frag->node) {
			__u32 holeend = min(end, frag->ofs + frag->size);
			D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
			memset(pg_buf, 0, holeend - offset);
			pg_buf += holeend - offset;
			offset = holeend;
			frag = frag->next;
			continue;
		} else {
			__u32 readlen;
			__u32 fragofs; /* offset within the frag to start reading */

			fragofs = offset - frag->ofs;
			readlen = min(frag->size - fragofs, end - offset);
			D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%x\n", frag->ofs+fragofs, 
				  fragofs+frag->ofs+readlen, frag->node->raw->flash_offset & ~3));
			ret = jffs2_read_dnode(c, frag->node, pg_buf, fragofs + frag->ofs - frag->node->ofs, readlen);
			D2(printk(KERN_DEBUG "node read done\n"));
			if (ret) {
				D1(printk(KERN_DEBUG"jffs2_readpage error %d\n",ret));
				memset(pg_buf, 0, readlen);
				ClearPageUptodate(pg);
				SetPageError(pg);
				kunmap(pg);
				return ret;
			}
		
			pg_buf += readlen;
			offset += readlen;
			frag = frag->next;
			D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
		}
	}
	D2(printk(KERN_DEBUG "readpage finishing\n"));
	SetPageUptodate(pg);
	ClearPageError(pg);

	flush_dcache_page(pg);

	kunmap(pg);
	D1(printk(KERN_DEBUG "readpage finished\n"));
	return 0;
}
Exemplo n.º 21
0
int j4fs_writepage(struct page *page, struct writeback_control *wbc)
{
	struct address_space *mapping = page->mapping;
	loff_t offset = (loff_t) page->index << PAGE_CACHE_SHIFT;
	struct inode *inode;
	unsigned long end_index;
	char *buffer;
	int nWritten = 0;
	unsigned nBytes;
	j4fs_ctrl ctl;
	int nErr;

	if(j4fs_panic==1) {
		T(J4FS_TRACE_ALWAYS,("%s %d: j4fs panic\n",__FUNCTION__,__LINE__));
		return -ENOSPC;
	}

	T(J4FS_TRACE_FS,("%s %d\n",__FUNCTION__,__LINE__));

	if (!mapping) BUG();

	inode = mapping->host;

	if (!inode) BUG();

	if (offset > inode->i_size) {
		T(J4FS_TRACE_FS,
			("j4fs_writepage at %08x, inode size = %08x!!!\n",
			(unsigned)(page->index << PAGE_CACHE_SHIFT),
			(unsigned)inode->i_size));
		T(J4FS_TRACE_FS,
			("                -> don't care!!\n"));
		unlock_page(page);
		return 0;
	}

	end_index = inode->i_size >> PAGE_CACHE_SHIFT;

	/* easy case */
	if (page->index < end_index)
		nBytes = PAGE_CACHE_SIZE;
	else
		nBytes = inode->i_size & (PAGE_CACHE_SIZE - 1);

	get_page(page);

	buffer = kmap(page);

	j4fs_GrossLock();

	T(J4FS_TRACE_FS,
		("j4fs_writepage: index=%08x,nBytes=%08x,inode.i_size=%05x\n", (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes,(int)inode->i_size));

	// write file
	ctl.buffer=buffer;
	ctl.count=nBytes;
	ctl.id=inode->i_ino;
	ctl.index=offset;

	nErr=fsd_write(&ctl);

	if(nErr==J4FS_RETRY_WRITE) nErr=fsd_write(&ctl);

	T(J4FS_TRACE_FS,
		("j4fs_writepage: index=%08x,nBytes=%08x,inode.i_size=%05x\n", (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes,(int)inode->i_size));

	j4fs_GrossUnlock();

	kunmap(page);
	SetPageUptodate(page);
	unlock_page(page);
	put_page(page);

	return (nWritten == nBytes) ? 0 : -ENOSPC;

}
Exemplo n.º 22
0
int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
{
	/* Actually commit the write from the page cache page we're looking at.
	 * For now, we write the full page out each time. It sucks, but it's simple
	 */
	struct inode *inode = pg->mapping->host;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	__u32 newsize = max_t(__u32, filp->f_dentry->d_inode->i_size, (pg->index << PAGE_CACHE_SHIFT) + end);
	__u32 file_ofs = (pg->index << PAGE_CACHE_SHIFT);
	__u32 writelen = min((__u32)PAGE_CACHE_SIZE, newsize - file_ofs);
	struct jffs2_raw_inode *ri;
	int ret = 0;
	ssize_t writtenlen = 0;

	D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));

	if (!start && end == PAGE_CACHE_SIZE) {
		/* We need to avoid deadlock with page_cache_read() in
		   jffs2_garbage_collect_pass(). So we have to mark the
		   page up to date, to prevent page_cache_read() from 
		   trying to re-lock it. */
		SetPageUptodate(pg);
	}

	ri = jffs2_alloc_raw_inode();
	if (!ri)
		return -ENOMEM;

	while(writelen) {
		struct jffs2_full_dnode *fn;
		unsigned char *comprbuf = NULL;
		unsigned char comprtype = JFFS2_COMPR_NONE;
		__u32 phys_ofs, alloclen;
		__u32 datalen, cdatalen;

		D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, file_ofs));

		ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen, ALLOC_NORMAL);
		if (ret) {
			SetPageError(pg);
			D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
			break;
		}
		down(&f->sem);
		datalen = writelen;
		cdatalen = min(alloclen - sizeof(*ri), writelen);

		comprbuf = kmalloc(cdatalen, GFP_KERNEL);
		if (comprbuf) {
			comprtype = jffs2_compress(page_address(pg)+ (file_ofs & (PAGE_CACHE_SIZE-1)), comprbuf, &datalen, &cdatalen);
		}
		if (comprtype == JFFS2_COMPR_NONE) {
			/* Either compression failed, or the allocation of comprbuf failed */
			if (comprbuf)
				kfree(comprbuf);
			comprbuf = page_address(pg) + (file_ofs & (PAGE_CACHE_SIZE -1));
			datalen = cdatalen;
		}
		/* Now comprbuf points to the data to be written, be it compressed or not.
		   comprtype holds the compression type, and comprtype == JFFS2_COMPR_NONE means
		   that the comprbuf doesn't need to be kfree()d. 
		*/

		ri->magic = JFFS2_MAGIC_BITMASK;
		ri->nodetype = JFFS2_NODETYPE_INODE;
		ri->totlen = sizeof(*ri) + cdatalen;
		ri->hdr_crc = crc32(0, ri, sizeof(struct jffs2_unknown_node)-4);

		ri->ino = inode->i_ino;
		ri->version = ++f->highest_version;
		ri->mode = inode->i_mode;
		ri->uid = inode->i_uid;
		ri->gid = inode->i_gid;
		ri->isize = max((__u32)inode->i_size, file_ofs + datalen);
		ri->atime = ri->ctime = ri->mtime = CURRENT_TIME;
		ri->offset = file_ofs;
		ri->csize = cdatalen;
		ri->dsize = datalen;
		ri->compr = comprtype;
		ri->node_crc = crc32(0, ri, sizeof(*ri)-8);
		ri->data_crc = crc32(0, comprbuf, cdatalen);

		fn = jffs2_write_dnode(inode, ri, comprbuf, cdatalen, phys_ofs, NULL);

		jffs2_complete_reservation(c);

		if (comprtype != JFFS2_COMPR_NONE)
			kfree(comprbuf);

		if (IS_ERR(fn)) {
			ret = PTR_ERR(fn);
			up(&f->sem);
			SetPageError(pg);
			break;
		}
		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
		if (f->metadata) {
			jffs2_mark_node_obsolete(c, f->metadata->raw);
			jffs2_free_full_dnode(f->metadata);
			f->metadata = NULL;
		}
		up(&f->sem);
		if (ret) {
			/* Eep */
			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret));
			jffs2_mark_node_obsolete(c, fn->raw);
			jffs2_free_full_dnode(fn);
			SetPageError(pg);
			break;
		}
		inode->i_size = ri->isize;
		inode->i_blocks = (inode->i_size + 511) >> 9;
		inode->i_ctime = inode->i_mtime = ri->ctime;
		if (!datalen) {
			printk(KERN_WARNING "Eep. We didn't actually write any bloody data\n");
			ret = -EIO;
			SetPageError(pg);
			break;
		}
		D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen));
		writtenlen += datalen;
		file_ofs += datalen;
		writelen -= datalen;
	}

	jffs2_free_raw_inode(ri);

	if (writtenlen < end) {
		/* generic_file_write has written more to the page cache than we've
		   actually written to the medium. Mark the page !Uptodate so that 
		   it gets reread */
		D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n"));
		SetPageError(pg);
		ClearPageUptodate(pg);
	}
	if (writtenlen <= start) {
		/* We didn't even get to the start of the affected part */
		ret = ret?ret:-ENOSPC;
		D1(printk(KERN_DEBUG "jffs2_commit_write(): Only %x bytes written to page. start (%x) not reached, returning %d\n", writtenlen, start, ret));
	}
	writtenlen = min(end-start, writtenlen-start);

	D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d. nrpages is %ld\n",writtenlen?writtenlen:ret, inode->i_mapping->nrpages));
	return writtenlen?writtenlen:ret;
}
Exemplo n.º 23
0
/*
 * Attempts to free an entry by adding a page to the swap cache,
 * decompressing the entry data into the page, and issuing a
 * bio write to write the page back to the swap device.
 *
 * This can be thought of as a "resumed writeback" of the page
 * to the swap device.  We are basically resuming the same swap
 * writeback path that was intercepted with the frontswap_store()
 * in the first place.  After the page has been decompressed into
 * the swap cache, the compressed version stored by zswap can be
 * freed.
 */
static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
{
	struct zswap_header *zhdr;
	swp_entry_t swpentry;
	struct zswap_tree *tree;
	pgoff_t offset;
	struct zswap_entry *entry;
	struct page *page;
	u8 *src, *dst;
	unsigned int dlen;
	int ret;
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_NONE,
	};

	/* extract swpentry from data */
	zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
	swpentry = zhdr->swpentry; /* here */
	zpool_unmap_handle(pool, handle);
	tree = zswap_trees[swp_type(swpentry)];
	offset = swp_offset(swpentry);

	/* find and ref zswap entry */
	spin_lock(&tree->lock);
	entry = zswap_entry_find_get(&tree->rbroot, offset);
	if (!entry) {
		/* entry was invalidated */
		spin_unlock(&tree->lock);
		return 0;
	}
	spin_unlock(&tree->lock);
	BUG_ON(offset != entry->offset);

	/* try to allocate swap cache page */
	switch (zswap_get_swap_cache_page(swpentry, &page)) {
	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
		ret = -ENOMEM;
		goto fail;

	case ZSWAP_SWAPCACHE_EXIST:
		/* page is already in the swap cache, ignore for now */
		page_cache_release(page);
		ret = -EEXIST;
		goto fail;

	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
		/* decompress */
		dlen = PAGE_SIZE;
		src = (u8 *)zpool_map_handle(zswap_pool, entry->handle,
				ZPOOL_MM_RO) + sizeof(struct zswap_header);
		dst = kmap_atomic(page);
		ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src,
				entry->length, dst, &dlen);
		kunmap_atomic(dst);
		zpool_unmap_handle(zswap_pool, entry->handle);
		BUG_ON(ret);
		BUG_ON(dlen != PAGE_SIZE);

		/* page is up to date */
		SetPageUptodate(page);
	}

	/* move it to the tail of the inactive list after end_writeback */
	SetPageReclaim(page);

	/* start writeback */
	__swap_writepage(page, &wbc, end_swap_bio_write);
	page_cache_release(page);
	zswap_written_back_pages++;

	spin_lock(&tree->lock);
	/* drop local reference */
	zswap_entry_put(tree, entry);

	/*
	* There are two possible situations for entry here:
	* (1) refcount is 1(normal case),  entry is valid and on the tree
	* (2) refcount is 0, entry is freed and not on the tree
	*     because invalidate happened during writeback
	*  search the tree and free the entry if find entry
	*/
	if (entry == zswap_rb_search(&tree->rbroot, offset))
		zswap_entry_put(tree, entry);
	spin_unlock(&tree->lock);

	goto end;

	/*
	* if we get here due to ZSWAP_SWAPCACHE_EXIST
	* a load may happening concurrently
	* it is safe and okay to not free the entry
	* if we free the entry in the following put
	* it it either okay to return !0
	*/
fail:
	spin_lock(&tree->lock);
	zswap_entry_put(tree, entry);
	spin_unlock(&tree->lock);

end:
	return ret;
}
Exemplo n.º 24
0
/**
 * write_pages - write block of data to device via the page cache
 * @dev: device to write to
 * @buf: data source or NULL if erase (output is set to 0xff)
 * @to: offset into output device
 * @len: amount to data to write
 * @retlen: amount of data written
 *
 * Grab pages from the page cache and fill them with the source data.
 * Non page aligned start and end result in a readin of the page and
 * part of the page being modified. Pages are added to the bio and then written
 * out.
 */
static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
		    size_t len, size_t *retlen)
{
	int pagenr, offset;
	size_t start_len = 0, end_len;
	int pagecnt = 0;
	int err = 0;
	struct bio *bio = NULL;
	size_t thislen = 0;

	pagenr = to >> PAGE_SHIFT;
	offset = to & ~PAGE_MASK;

	DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %zd pagenr = %d offset = %d\n",
	      buf, (long)to, len, pagenr, offset);

	/* see if we have to do a partial write at the start */
	if(offset) {
		start_len = ((offset + len) > PAGE_SIZE) ? PAGE_SIZE - offset : len;
		len -= start_len;
	}

	/* calculate the length of the other two regions */
	end_len = len & ~PAGE_MASK;
	len -= end_len;

	if(start_len)
		pagecnt++;

	if(len)
		pagecnt += len >> PAGE_SHIFT;

	if(end_len)
		pagecnt++;

	down(&dev->wrbuf_mutex);

	DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n",
	      start_len, len, end_len, pagecnt);

	if(start_len) {
		/* do partial start region */
		struct page *page;

		DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %zd offset = %d\n",
		      pagenr, start_len, offset);

		BUG_ON(!buf);
		page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
		lock_page(page);
		if(PageDirty(page)) {
			err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
			    to, start_len, len, end_len, pagenr);
			BUG();
		}
		memcpy(page_address(page)+offset, buf, start_len);
		SetPageDirty(page);
		SetPageUptodate(page);
		buf += start_len;
		thislen = start_len;
		bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
		if(!bio) {
			err = -ENOMEM;
			err("bio_add_page failed\n");
			goto write_err;
		}
		pagecnt--;
		pagenr++;
	}

	/* Now do the main loop to a page aligned, n page sized output */
	if(len) {
		int pagesc = len >> PAGE_SHIFT;
		DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n",
		      pagenr, pagesc);
		while(pagesc) {
			struct page *page;

			/* see if page is in the page cache */
			DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr);
			page = grab_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr);
			if(PageDirty(page)) {
				BUG();
			}
			if(!page) {
				warn("write: cannot grab cache page %d", pagenr);
				err = -ENOMEM;
				goto write_err;
			}
			if(!buf) {
				memset(page_address(page), 0xff, PAGE_SIZE);
			} else {
				memcpy(page_address(page), buf, PAGE_SIZE);
				buf += PAGE_SIZE;
			}
			bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
			if(!bio) {
				err = -ENOMEM;
				err("bio_add_page failed\n");
				goto write_err;
			}
			pagenr++;
			pagecnt--;
			SetPageDirty(page);
			SetPageUptodate(page);
			pagesc--;
			thislen += PAGE_SIZE;
		}
	}

	if(end_len) {
		/* do the third region */
		struct page *page;
		DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %zd\n",
		      pagenr, end_len);
		BUG_ON(!buf);
		page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
		lock_page(page);
		if(PageDirty(page)) {
			err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
			    to, start_len, len, end_len, pagenr);
			BUG();
		}
		memcpy(page_address(page), buf, end_len);
		SetPageDirty(page);
		SetPageUptodate(page);
		DEBUG(3, "blkmtd: write: writing out partial end\n");
		thislen += end_len;
		bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
		if(!bio) {
			err = -ENOMEM;
			err("bio_add_page failed\n");
			goto write_err;
		}
		pagenr++;
	}

	DEBUG(3, "blkmtd: write: got %d vectors to write\n", bio->bi_vcnt);
 write_err:
	if(bio)
		blkmtd_write_out(bio);

	DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err);
	up(&dev->wrbuf_mutex);

	if(retlen)
		*retlen = thislen;
	return err;
}
Exemplo n.º 25
0
static int ext4_destroy_inline_data_nolock(handle_t *handle,
					   struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_xattr_ibody_find is = {
		.s = { .not_found = 0, },
	};
	struct ext4_xattr_info i = {
		.name_index = EXT4_XATTR_INDEX_SYSTEM,
		.name = EXT4_XATTR_SYSTEM_DATA,
		.value = NULL,
		.value_len = 0,
	};
	int error;

	if (!ei->i_inline_off)
		return 0;

	error = ext4_get_inode_loc(inode, &is.iloc);
	if (error)
		return error;

	error = ext4_xattr_ibody_find(inode, &i, &is);
	if (error)
		goto out;

	BUFFER_TRACE(is.iloc.bh, "get_write_access");
	error = ext4_journal_get_write_access(handle, is.iloc.bh);
	if (error)
		goto out;

	error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
	if (error)
		goto out;

	memset((void *)ext4_raw_inode(&is.iloc)->i_block,
		0, EXT4_MIN_INLINE_DATA_SIZE);

	if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
				      EXT4_FEATURE_INCOMPAT_EXTENTS)) {
		if (S_ISDIR(inode->i_mode) ||
		    S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) {
			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
			ext4_ext_tree_init(handle, inode);
		}
	}
	ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);

	get_bh(is.iloc.bh);
	error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);

	EXT4_I(inode)->i_inline_off = 0;
	EXT4_I(inode)->i_inline_size = 0;
	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
out:
	brelse(is.iloc.bh);
	if (error == -ENODATA)
		error = 0;
	return error;
}

static int ext4_read_inline_page(struct inode *inode, struct page *page)
{
	void *kaddr;
	int ret = 0;
	size_t len;
	struct ext4_iloc iloc;

	BUG_ON(!PageLocked(page));
	BUG_ON(!ext4_has_inline_data(inode));
	BUG_ON(page->index);

	if (!EXT4_I(inode)->i_inline_off) {
		ext4_warning(inode->i_sb, "inode %lu doesn't have inline data.",
			     inode->i_ino);
		goto out;
	}

	ret = ext4_get_inode_loc(inode, &iloc);
	if (ret)
		goto out;

	len = min_t(size_t, ext4_get_inline_size(inode), i_size_read(inode));
	kaddr = kmap_atomic(page);
	ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
	flush_dcache_page(page);
	kunmap_atomic(kaddr);
	zero_user_segment(page, len, PAGE_CACHE_SIZE);
	SetPageUptodate(page);
	brelse(iloc.bh);

out:
	return ret;
}
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
	void *src_addr, *dst_addr;
	struct f2fs_io_info fio = {
		.type = DATA,
		.rw = WRITE_SYNC | REQ_PRIO,
	};
	int dirty, err;

	f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);

	if (!f2fs_exist_data(dn->inode))
		goto clear_out;

	err = f2fs_reserve_block(dn, 0);
	if (err)
		return err;

	f2fs_wait_on_page_writeback(page, DATA);

	if (PageUptodate(page))
		goto no_update;

	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

	/* Copy the whole inline data block */
	src_addr = inline_data_addr(dn->inode_page);
	dst_addr = kmap_atomic(page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	flush_dcache_page(page);
	kunmap_atomic(dst_addr);
	SetPageUptodate(page);
no_update:
	/* clear dirty state */
	dirty = clear_page_dirty_for_io(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	fio.blk_addr = dn->data_blkaddr;
	write_data_page(page, dn, &fio);
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
	f2fs_wait_on_page_writeback(page, DATA);
	if (dirty)
		inode_dec_dirty_pages(dn->inode);

	/* this converted inline_data should be recovered. */
	set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);

	/* clear inline data and flag after data writeback */
	truncate_inline_inode(dn->inode_page, 0);
clear_out:
	stat_dec_inline_inode(dn->inode);
	f2fs_clear_inline_inode(dn->inode);
	sync_inode_page(dn);
	f2fs_put_dnode(dn);
	return 0;
}

int f2fs_convert_inline_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	struct page *ipage, *page;
	int err = 0;

	page = grab_cache_page(inode->i_mapping, 0);
	if (!page)
		return -ENOMEM;

	f2fs_lock_op(sbi);

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode))
		err = f2fs_convert_inline_page(&dn, page);

	f2fs_put_dnode(&dn);
out:
	f2fs_unlock_op(sbi);

	f2fs_put_page(page, 1);
	return err;
}

int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
	void *src_addr, *dst_addr;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;

	if (!f2fs_has_inline_data(inode)) {
		f2fs_put_dnode(&dn);
		return -EAGAIN;
	}

	f2fs_bug_on(F2FS_I_SB(inode), page->index);

	f2fs_wait_on_page_writeback(dn.inode_page, NODE);
	src_addr = kmap_atomic(page);
	dst_addr = inline_data_addr(dn.inode_page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap_atomic(src_addr);

	set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
	set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
	return 0;
}
Exemplo n.º 27
0
Arquivo: cache.c Projeto: 274914765/C
/*
 * Create dentry/inode for this file and add it to the dircache.
 */
int
smb_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
           struct smb_cache_control *ctrl, struct qstr *qname,
           struct smb_fattr *entry)
{
    struct dentry *newdent, *dentry = filp->f_path.dentry;
    struct inode *newino, *inode = dentry->d_inode;
    struct smb_cache_control ctl = *ctrl;
    int valid = 0;
    int hashed = 0;
    ino_t ino = 0;

    qname->hash = full_name_hash(qname->name, qname->len);

    if (dentry->d_op && dentry->d_op->d_hash)
        if (dentry->d_op->d_hash(dentry, qname) != 0)
            goto end_advance;

    newdent = d_lookup(dentry, qname);

    if (!newdent) {
        newdent = d_alloc(dentry, qname);
        if (!newdent)
            goto end_advance;
    } else {
        hashed = 1;
        memcpy((char *) newdent->d_name.name, qname->name,
               newdent->d_name.len);
    }

    if (!newdent->d_inode) {
        smb_renew_times(newdent);
        entry->f_ino = iunique(inode->i_sb, 2);
        newino = smb_iget(inode->i_sb, entry);
        if (newino) {
            smb_new_dentry(newdent);
            d_instantiate(newdent, newino);
            if (!hashed)
                d_rehash(newdent);
        }
    } else
        smb_set_inode_attr(newdent->d_inode, entry);

        if (newdent->d_inode) {
        ino = newdent->d_inode->i_ino;
        newdent->d_fsdata = (void *) ctl.fpos;
        smb_new_dentry(newdent);
    }

    if (ctl.idx >= SMB_DIRCACHE_SIZE) {
        if (ctl.page) {
            kunmap(ctl.page);
            SetPageUptodate(ctl.page);
            unlock_page(ctl.page);
            page_cache_release(ctl.page);
        }
        ctl.cache = NULL;
        ctl.idx  -= SMB_DIRCACHE_SIZE;
        ctl.ofs  += 1;
        ctl.page  = grab_cache_page(&inode->i_data, ctl.ofs);
        if (ctl.page)
            ctl.cache = kmap(ctl.page);
    }
    if (ctl.cache) {
        ctl.cache->dentry[ctl.idx] = newdent;
        valid = 1;
    }
    dput(newdent);

end_advance:
    if (!valid)
        ctl.valid = 0;
    if (!ctl.filled && (ctl.fpos == filp->f_pos)) {
        if (!ino)
            ino = find_inode_number(dentry, qname);
        if (!ino)
            ino = iunique(inode->i_sb, 2);
        ctl.filled = filldir(dirent, qname->name, qname->len,
                     filp->f_pos, ino, DT_UNKNOWN);
        if (!ctl.filled)
            filp->f_pos += 1;
    }
    ctl.fpos += 1;
    ctl.idx  += 1;
    *ctrl = ctl;
    return (ctl.valid || !ctl.filled);
}
Exemplo n.º 28
0
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page = NULL;
	struct page *ipage;
	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
	struct dnode_of_data dn;
	int err = 0;

	trace_f2fs_write_begin(inode, pos, len, flags);

	f2fs_balance_fs(sbi);

	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
repeat:
	page = grab_cache_page_write_begin(mapping, index, flags);
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}

	*pagep = page;

	f2fs_lock_op(sbi);

	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_fail;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
			set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
			sync_inode_page(&dn);
			goto put_next;
		}
		err = f2fs_convert_inline_page(&dn, page);
		if (err)
			goto put_fail;
	}

	err = f2fs_get_block(&dn, index);
	if (err)
		goto put_fail;
put_next:
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);

	f2fs_wait_on_page_writeback(page, DATA);

	if (len == PAGE_CACHE_SIZE)
		goto out_update;
	if (PageUptodate(page))
		goto out_clear;

	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
		goto out_update;
	}

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	} else {
		struct f2fs_io_info fio = {
			.sbi = sbi,
			.type = DATA,
			.rw = READ_SYNC,
			.blk_addr = dn.data_blkaddr,
			.page = page,
			.encrypted_page = NULL,
		};
		err = f2fs_submit_page_bio(&fio);
		if (err)
			goto fail;

		lock_page(page);
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
		}
		if (unlikely(page->mapping != mapping)) {
			f2fs_put_page(page, 1);
			goto repeat;
		}

		/* avoid symlink page */
		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
			err = f2fs_decrypt_one(inode, page);
			if (err)
				goto fail;
		}
	}
out_update:
	SetPageUptodate(page);
out_clear:
	clear_cold_data(page);
	return 0;

put_fail:
	f2fs_put_dnode(&dn);
unlock_fail:
	f2fs_unlock_op(sbi);
fail:
	f2fs_put_page(page, 1);
	f2fs_write_failed(mapping, pos + len);
	return err;
}

static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

	trace_f2fs_write_end(inode, pos, len, copied);

	set_page_dirty(page);

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}

	f2fs_put_page(page, 1);
	return copied;
}

static ssize_t check_direct_IO(struct inode *inode, int rw,
		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
	int seg, i;
	size_t size;
	unsigned long addr;
	ssize_t retval = -EINVAL;
	loff_t end = offset;

	if (offset & blocksize_mask)
		return -EINVAL;

	/* Check the memory alignment.  Blocks cannot straddle pages */
	for (seg = 0; seg < nr_segs; seg++) {
		addr = (unsigned long)iov[seg].iov_base;
		size = iov[seg].iov_len;
		end += size;
		if ((addr & blocksize_mask) || (size & blocksize_mask))
			goto out;

		/* If this is a write we don't need to check anymore */
		if (rw & WRITE)
			continue;

		/*
		 * Check to make sure we don't have duplicate iov_base's in this
		 * iovec, if so return EINVAL, otherwise we'll get csum errors
		 * when reading back.
		 */
		for (i = seg + 1; i < nr_segs; i++) {
			if (iov[seg].iov_base == iov[i].iov_base)
				goto out;
		}
	}
	retval = 0;
out:
	return retval;
}
Exemplo n.º 29
0
/* mm->page_table_lock is held. mmap_sem is not held */
static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page, zone_t * classzone)
{
	pte_t pte;
	swp_entry_t entry;

	/* Don't look at this pte if it's been accessed recently. */
	if ((vma->vm_flags & VM_LOCKED) || ptep_test_and_clear_young(page_table)) {
		mark_page_accessed(page);
		return 0;
	}

	/* Don't bother unmapping pages that are active */
	if (PageActive(page))
		return 0;

	/* Don't bother replenishing zones not under pressure.. */
	if (!memclass(page_zone(page), classzone))
		return 0;

	if (TryLockPage(page))
		return 0;

	/* From this point on, the odds are that we're going to
	 * nuke this pte, so read and clear the pte.  This hook
	 * is needed on CPUs which update the accessed and dirty
	 * bits in hardware.
	 */
	flush_cache_page(vma, address);
	pte = ptep_get_and_clear(page_table);
	flush_tlb_page(vma, address);

	if (pte_dirty(pte))
		set_page_dirty(page);

	/*
	 * Is the page already in the swap cache? If so, then
	 * we can just drop our reference to it without doing
	 * any IO - it's already up-to-date on disk.
	 */
	if (PageSwapCache(page)) {
		entry.val = page->index;
		swap_duplicate(entry);
set_swap_pte:
		set_pte(page_table, swp_entry_to_pte(entry));
drop_pte:
		mm->rss--;
#ifdef __arm__
		memc_clear(vma->vm_mm, page);
#endif
		UnlockPage(page);
		{
			int freeable = page_count(page) - !!page->buffers <= 2;
			page_cache_release(page);
			return freeable;
		}
	}

	/*
	 * Is it a clean page? Then it must be recoverable
	 * by just paging it in again, and we can just drop
	 * it..  or if it's dirty but has backing store,
	 * just mark the page dirty and drop it.
	 *
	 * However, this won't actually free any real
	 * memory, as the page will just be in the page cache
	 * somewhere, and as such we should just continue
	 * our scan.
	 *
	 * Basically, this just makes it possible for us to do
	 * some real work in the future in "refill_inactive()".
	 */
	if (page->mapping)
		goto drop_pte;
	if (!PageDirty(page))
		goto drop_pte;

	/*
	 * Anonymous buffercache pages can be left behind by
	 * concurrent truncate and pagefault.
	 */
	if (page->buffers)
		goto preserve;

	/*
	 * This is a dirty, swappable page.  First of all,
	 * get a suitable swap entry for it, and make sure
	 * we have the swap cache set up to associate the
	 * page with that swap entry.
	 */
	for (;;) {
		entry = get_swap_page();
		if (!entry.val)
			break;
		/* Add it to the swap cache and mark it dirty
		 * (adding to the page cache will clear the dirty
		 * and uptodate bits, so we need to do it again)
		 */
		if (add_to_swap_cache(page, entry) == 0) {
			SetPageUptodate(page);
			set_page_dirty(page);
			goto set_swap_pte;
		}
		/* Raced with "speculative" read_swap_cache_async */
		swap_free(entry);
	}

	/* No swap space left */
preserve:
	set_pte(page_table, pte);
	UnlockPage(page);
	return 0;
}
Exemplo n.º 30
0
Arquivo: rd.c Projeto: nhanh0/hah
static int rd_blkdev_pagecache_IO(int rw, struct buffer_head * sbh, int minor)
{
	struct address_space * mapping;
	unsigned long index;
	int offset, size, err;

	err = -EIO;
	err = 0;
	mapping = rd_bdev[minor]->bd_inode->i_mapping;

	index = sbh->b_rsector >> (PAGE_CACHE_SHIFT - 9);
	offset = (sbh->b_rsector << 9) & ~PAGE_CACHE_MASK;
	size = sbh->b_size;

	do {
		int count;
		struct page ** hash;
		struct page * page;
		char * src, * dst;
		int unlock = 0;

		count = PAGE_CACHE_SIZE - offset;
		if (count > size)
			count = size;
		size -= count;

		hash = page_hash(mapping, index);
		page = __find_get_page(mapping, index, hash);
		if (!page) {
			page = grab_cache_page(mapping, index);
			err = -ENOMEM;
			if (!page)
				goto out;
			err = 0;

			if (!Page_Uptodate(page)) {
				memset(kmap(page), 0, PAGE_CACHE_SIZE);
				kunmap(page);
				SetPageUptodate(page);
			}

			unlock = 1;
		}

		index++;

		if (rw == READ) {
			src = kmap(page);
			src += offset;
			dst = bh_kmap(sbh);
		} else {
			dst = kmap(page);
			dst += offset;
			src = bh_kmap(sbh);
		}
		offset = 0;

		memcpy(dst, src, count);

		kunmap(page);
		bh_kunmap(sbh);

		if (rw == READ) {
			flush_dcache_page(page);
		} else {
			SetPageDirty(page);
		}
		if (unlock)
			UnlockPage(page);
		__free_page(page);
	} while (size);

 out:
	return err;
}