Exemple #1
0
int __microfs_readpage(struct file* file, struct page* page)
{
	struct inode* inode = page->mapping->host;
	struct super_block* sb = inode->i_sb;
	struct microfs_sb_info* sbi = MICROFS_SB(sb);
	
	int err = 0;
	int small_blks = sbi->si_blksz <= PAGE_SIZE;
	
	__u32 i;
	__u32 j;
	
	__u32 data_offset = 0;
	__u32 data_length = 0;
	__u32 blk_data_offset = 0;
	__u32 blk_data_length = 0;
	
	__u32 pgholes = 0;
	
	__u32 blk_ptrs = i_blks(i_size_read(inode), sbi->si_blksz);
	__u32 blk_nr = small_blks
		? page->index * (PAGE_SIZE >> sbi->si_blkshift)
		: page->index / (sbi->si_blksz / PAGE_SIZE);
	
	int index_mask = small_blks
		? 0
		: (1 << (sbi->si_blkshift - PAGE_SHIFT)) - 1;
	
	__u32 max_index = i_blks(i_size_read(inode), PAGE_SIZE);
	__u32 start_index = (small_blks ? page->index : page->index & ~index_mask);
	__u32 end_index = (small_blks ? page->index : start_index | index_mask) + 1;
	
	struct microfs_readpage_request rdreq;
	
	if (end_index > max_index)
		end_index = max_index;
	
	pr_spam("__microfs_readpage: sbi->si_blksz=%u, blk_ptrs=%u, blk_nr=%u\n",
		sbi->si_blksz, blk_ptrs, blk_nr);
	pr_spam("__microfs_readpage: start_index=%u, end_index=%u, max_index=%u\n",
		start_index, end_index, max_index);
	
	mutex_lock(&sbi->si_metadata_blkptrbuf.d_mutex);
	for (i = 0; (data_length < PAGE_SIZE && blk_nr + i < blk_ptrs) &&
			(i == 0 || sbi->si_blksz < PAGE_SIZE); ++i) {
		err = __microfs_find_block(sb, inode, blk_ptrs, blk_nr + i,
			&blk_data_offset, &blk_data_length);
		if (unlikely(err)) {
			mutex_unlock(&sbi->si_metadata_blkptrbuf.d_mutex);
			goto err_find_block;
		}
		if (!data_offset)
			data_offset = blk_data_offset;
		data_length += blk_data_length;
	}
	mutex_unlock(&sbi->si_metadata_blkptrbuf.d_mutex);
	
	pr_spam("__microfs_readpage: data_offset=0x%x, data_length=%u\n",
		data_offset, data_length);
	
	rdreq.rr_bhoffset = data_offset - (data_offset & PAGE_MASK);
	rdreq.rr_npages = end_index - start_index;
	rdreq.rr_pages = kmalloc(rdreq.rr_npages * sizeof(void*), GFP_KERNEL);
	if (!rdreq.rr_pages) {
		pr_err("__microfs_readpage: failed to allocate rdreq.rr_pages (%u slots)\n",
			rdreq.rr_npages);
		err = -ENOMEM;
		goto err_mem;
	}
	
	pr_spam("__microfs_readpage: rdreq.rr_pages=0x%p, rdreq.rr_npages=%u\n",
		rdreq.rr_pages, rdreq.rr_npages);
	
	for (i = 0, j = start_index; j < end_index; ++i, ++j) {
		rdreq.rr_pages[i] = (j == page->index)
			? page
			: grab_cache_page_nowait(page->mapping, j);
		if (rdreq.rr_pages[i] == page) {
			pr_spam("__microfs_readpage: target page 0x%p at index %u\n",
				page, j);
		} else if (rdreq.rr_pages[i] == NULL) {
			pgholes++;
			pr_spam("__microfs_readpage: busy page at index %u\n", j);
		} else if (PageUptodate(rdreq.rr_pages[i])) {
			unlock_page(rdreq.rr_pages[i]);
			put_page(rdreq.rr_pages[i]);
			rdreq.rr_pages[i] = NULL;
			pgholes++;
			pr_spam("__microfs_readpage: page up to date at index %u\n", j);
		} else {
			pr_spam("__microfs_readpage: new page 0x%p added for index %u\n",
				rdreq.rr_pages[i], j);
		}
	}
	
	pr_spam("__microfs_readpage: pgholes=%u\n", pgholes);
	
	if (pgholes) {
		/* It seems that one or more pages have been reclaimed, but
		 * it is also possible that another thread is trying to read
		 * the same data.
		 */
		err = __microfs_read_blks(sb, page->mapping, &rdreq,
			__microfs_recycle_filedata_exceptionally,
			__microfs_copy_filedata_exceptionally,
			data_offset, data_length);
	} else {
		/* It is possible to uncompress the file data directly into
		 * the page cache. Neat.
		 */
		err = __microfs_read_blks(sb, page->mapping, &rdreq,
			__microfs_recycle_filedata_nominally,
			__microfs_copy_filedata_nominally,
			data_offset, data_length);
	}
	if (unlikely(err)) {
		pr_err("__microfs_readpage: __microfs_read_blks failed\n");
		goto err_io;
	}
	
	for (i = 0; i < rdreq.rr_npages; ++i) {
		if (rdreq.rr_pages[i]) {
			flush_dcache_page(rdreq.rr_pages[i]);
			SetPageUptodate(rdreq.rr_pages[i]);
			unlock_page(rdreq.rr_pages[i]);
			if (rdreq.rr_pages[i] != page)
				put_page(rdreq.rr_pages[i]);
		}
	}
	
	kfree(rdreq.rr_pages);
	
	return 0;
	
err_io:
	pr_spam("__microfs_readpage: failure\n");
	for (i = 0; i < rdreq.rr_npages; ++i) {
		if (rdreq.rr_pages[i]) {
			flush_dcache_page(rdreq.rr_pages[i]);
			SetPageError(rdreq.rr_pages[i]);
			unlock_page(rdreq.rr_pages[i]);
			if (rdreq.rr_pages[i] != page)
				put_page(rdreq.rr_pages[i]);
		}
	}
	kfree(rdreq.rr_pages);
err_mem:
	/* Fall-trough. */
err_find_block:
	return err;
}
Exemple #2
0
static int ll_write_begin(struct file *file, struct address_space *mapping,
                          loff_t pos, unsigned len, unsigned flags,
                          struct page **pagep, void **fsdata)
{
    struct ll_cl_context *lcc;
    const struct lu_env  *env = NULL;
    struct cl_io   *io;
    struct cl_page *page = NULL;

    struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
    pgoff_t index = pos >> PAGE_SHIFT;
    struct page *vmpage = NULL;
    unsigned from = pos & (PAGE_SIZE - 1);
    unsigned to = from + len;
    int result = 0;
    ENTRY;

    CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);

    lcc = ll_cl_find(file);
    if (lcc == NULL) {
        io = NULL;
        GOTO(out, result = -EIO);
    }

    env = lcc->lcc_env;
    io  = lcc->lcc_io;

    /* To avoid deadlock, try to lock page first. */
    vmpage = grab_cache_page_nowait(mapping, index);

    if (unlikely(vmpage == NULL ||
                 PageDirty(vmpage) || PageWriteback(vmpage))) {
        struct vvp_io *vio = vvp_env_io(env);
        struct cl_page_list *plist = &vio->u.write.vui_queue;

        /* if the page is already in dirty cache, we have to commit
        * the pages right now; otherwise, it may cause deadlock
        	 * because it holds page lock of a dirty page and request for
        	 * more grants. It's okay for the dirty page to be the first
        	 * one in commit page list, though. */
        if (vmpage != NULL && plist->pl_nr > 0) {
            unlock_page(vmpage);
            put_page(vmpage);
            vmpage = NULL;
        }

        /* commit pages and then wait for page lock */
        result = vvp_io_write_commit(env, io);
        if (result < 0)
            GOTO(out, result);

        if (vmpage == NULL) {
            vmpage = grab_cache_page_write_begin(mapping, index,
                                                 flags);
            if (vmpage == NULL)
                GOTO(out, result = -ENOMEM);
        }
    }

    page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
    if (IS_ERR(page))
        GOTO(out, result = PTR_ERR(page));

    lcc->lcc_page = page;
    lu_ref_add(&page->cp_reference, "cl_io", io);

    cl_page_assume(env, io, page);
    if (!PageUptodate(vmpage)) {
        /*
         * We're completely overwriting an existing page,
         * so _don't_ set it up to date until commit_write
         */
        if (from == 0 && to == PAGE_SIZE) {
            CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n");
            POISON_PAGE(vmpage, 0x11);
        } else {
            /* TODO: can be optimized at OSC layer to check if it
             * is a lockless IO. In that case, it's not necessary
             * to read the data. */
            result = ll_prepare_partial_page(env, io, page);
            if (result == 0)
                SetPageUptodate(vmpage);
        }
    }
    if (result < 0)
        cl_page_unassume(env, io, page);
    EXIT;
out:
    if (result < 0) {
        if (vmpage != NULL) {
            unlock_page(vmpage);
            put_page(vmpage);
        }
        if (!IS_ERR_OR_NULL(page)) {
            lu_ref_del(&page->cp_reference, "cl_io", io);
            cl_page_put(env, page);
        }
        if (io)
            io->ci_result = result;
    } else {
        *pagep = vmpage;
        *fsdata = lcc;
    }
    RETURN(result);
}
Exemple #3
0
/* Read separately compressed datablock directly into page cache */
int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)

{
	struct inode *inode = target_page->mapping->host;
	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;

	int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
	int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
	int start_index = target_page->index & ~mask;
	int end_index = start_index | mask;
	int i, n, pages, missing_pages, bytes, res = -ENOMEM;
	struct page **page;
	struct squashfs_page_actor *actor;
	void *pageaddr;

	if (end_index > file_end)
		end_index = file_end;

	pages = end_index - start_index + 1;

	page = kmalloc(sizeof(void *) * pages, GFP_KERNEL);
	if (page == NULL)
		return res;

	/*
	 * Create a "page actor" which will kmap and kunmap the
	 * page cache pages appropriately within the decompressor
	 */
	actor = squashfs_page_actor_init_special(page, pages, 0);
	if (actor == NULL)
		goto out;

	/* Try to grab all the pages covered by the Squashfs block */
	for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
		page[i] = (n == target_page->index) ? target_page :
			grab_cache_page_nowait(target_page->mapping, n);

		if (page[i] == NULL) {
			missing_pages++;
			continue;
		}

		if (PageUptodate(page[i])) {
			unlock_page(page[i]);
			page_cache_release(page[i]);
			page[i] = NULL;
			missing_pages++;
		}
	}

	if (missing_pages) {
		/*
		 * Couldn't get one or more pages, this page has either
		 * been VM reclaimed, but others are still in the page cache
		 * and uptodate, or we're racing with another thread in
		 * squashfs_readpage also trying to grab them.  Fall back to
		 * using an intermediate buffer.
		 */
		res = squashfs_read_cache(target_page, block, bsize, pages,
								page);
		if (res < 0)
			goto mark_errored;

		goto out;
	}

	/* Decompress directly into the page cache buffers */
	res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
	if (res < 0)
		goto mark_errored;

	/* Last page may have trailing bytes not filled */
	bytes = res % PAGE_CACHE_SIZE;
	if (bytes) {
		pageaddr = kmap_atomic(page[pages - 1]);
		memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
		kunmap_atomic(pageaddr);
	}

	/* Mark pages as uptodate, unlock and release */
	for (i = 0; i < pages; i++) {
		flush_dcache_page(page[i]);
		SetPageUptodate(page[i]);
		unlock_page(page[i]);
		if (page[i] != target_page)
			page_cache_release(page[i]);
	}

	kfree(actor);
	kfree(page);

	return 0;

mark_errored:
	/* Decompression failed, mark pages as errored.  Target_page is
	 * dealt with by the caller
	 */
	for (i = 0; i < pages; i++) {
		if (page[i] == NULL || page[i] == target_page)
			continue;
		flush_dcache_page(page[i]);
		SetPageError(page[i]);
		unlock_page(page[i]);
		page_cache_release(page[i]);
	}

out:
	kfree(actor);
	kfree(page);
	return res;
}