예제 #1
0
파일: rw26.c 프로젝트: AshishNamdev/linux
/**
 * Implements Linux VM address_space::invalidatepage() method. This method is
 * called when the page is truncate from a file, either as a result of
 * explicit truncate, or when inode is removed from memory (as a result of
 * final iput(), umount, or memory pressure induced icache shrinking).
 *
 * [0, offset] bytes of the page remain valid (this is for a case of not-page
 * aligned truncate). Lustre leaves partially truncated page in the cache,
 * relying on struct inode::i_size to limit further accesses.
 */
static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
			      unsigned int length)
{
	struct inode     *inode;
	struct lu_env    *env;
	struct cl_page   *page;
	struct cl_object *obj;

	LASSERT(PageLocked(vmpage));
	LASSERT(!PageWriteback(vmpage));

	/*
	 * It is safe to not check anything in invalidatepage/releasepage
	 * below because they are run with page locked and all our io is
	 * happening with locked page too
	 */
	if (offset == 0 && length == PAGE_SIZE) {
		/* See the comment in ll_releasepage() */
		env = cl_env_percpu_get();
		LASSERT(!IS_ERR(env));
		inode = vmpage->mapping->host;
		obj = ll_i2info(inode)->lli_clob;
		if (obj) {
			page = cl_vmpage_page(vmpage, obj);
			if (page) {
				cl_page_delete(env, page);
				cl_page_put(env, page);
			}
		} else {
			LASSERT(vmpage->private == 0);
		}
		cl_env_percpu_put(env);
	}
예제 #2
0
파일: rw26.c 프로젝트: rread/lustre
static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
{
    struct lu_env		*env;
    struct cl_object	*obj;
    struct cl_page		*page;
    struct address_space	*mapping;
    int result = 0;

    LASSERT(PageLocked(vmpage));
    if (PageWriteback(vmpage) || PageDirty(vmpage))
        return 0;

    mapping = vmpage->mapping;
    if (mapping == NULL)
        return 1;

    obj = ll_i2info(mapping->host)->lli_clob;
    if (obj == NULL)
        return 1;

    /* 1 for caller, 1 for cl_page and 1 for page cache */
    if (page_count(vmpage) > 3)
        return 0;

    page = cl_vmpage_page(vmpage, obj);
    if (page == NULL)
        return 1;

    env = cl_env_percpu_get();
    LASSERT(!IS_ERR(env));

    if (!cl_page_in_use(page)) {
        result = 1;
        cl_page_delete(env, page);
    }

    /* To use percpu env array, the call path can not be rescheduled;
     * otherwise percpu array will be messed if ll_releaspage() called
     * again on the same CPU.
     *
     * If this page holds the last refc of cl_object, the following
     * call path may cause reschedule:
     *   cl_page_put -> cl_page_free -> cl_object_put ->
     *     lu_object_put -> lu_object_free -> lov_delete_raid0.
     *
     * However, the kernel can't get rid of this inode until all pages have
     * been cleaned up. Now that we hold page lock here, it's pretty safe
     * that we won't get into object delete path.
     */
    LASSERT(cl_object_refc(obj) > 1);
    cl_page_put(env, page);

    cl_env_percpu_put(env);
    return result;
}
예제 #3
0
파일: rw26.c 프로젝트: bacaldwell/lustre
/**
 * Implements Linux VM address_space::invalidatepage() method. This method is
 * called when the page is truncate from a file, either as a result of
 * explicit truncate, or when inode is removed from memory (as a result of
 * final iput(), umount, or memory pressure induced icache shrinking).
 *
 * [0, offset] bytes of the page remain valid (this is for a case of not-page
 * aligned truncate). Lustre leaves partially truncated page in the cache,
 * relying on struct inode::i_size to limit further accesses.
 */
static void ll_invalidatepage(struct page *vmpage,
#ifdef HAVE_INVALIDATE_RANGE
				unsigned int offset, unsigned int length
#else
				unsigned long offset
#endif
			     )
{
        struct inode     *inode;
        struct lu_env    *env;
        struct cl_page   *page;
        struct cl_object *obj;

        int refcheck;

        LASSERT(PageLocked(vmpage));
        LASSERT(!PageWriteback(vmpage));

	/*
	 * It is safe to not check anything in invalidatepage/releasepage
	 * below because they are run with page locked and all our io is
	 * happening with locked page too
	 */
#ifdef HAVE_INVALIDATE_RANGE
	if (offset == 0 && length == PAGE_CACHE_SIZE) {
#else
	if (offset == 0) {
#endif
                env = cl_env_get(&refcheck);
                if (!IS_ERR(env)) {
                        inode = vmpage->mapping->host;
                        obj = ll_i2info(inode)->lli_clob;
                        if (obj != NULL) {
                                page = cl_vmpage_page(vmpage, obj);
                                if (page != NULL) {
                                        cl_page_delete(env, page);
                                        cl_page_put(env, page);
                                }
                        } else
                                LASSERT(vmpage->private == 0);
                        cl_env_put(env, &refcheck);
                }
        }
예제 #4
0
파일: rw26.c 프로젝트: hpc/lustre
/**
 * Implements Linux VM address_space::invalidatepage() method. This method is
 * called when the page is truncate from a file, either as a result of
 * explicit truncate, or when inode is removed from memory (as a result of
 * final iput(), umount, or memory pressure induced icache shrinking).
 *
 * [0, offset] bytes of the page remain valid (this is for a case of not-page
 * aligned truncate). Lustre leaves partially truncated page in the cache,
 * relying on struct inode::i_size to limit further accesses.
 */
static int cl_invalidatepage(struct page *vmpage, unsigned long offset)
{
        struct inode     *inode;
        struct lu_env    *env;
        struct cl_page   *page;
        struct cl_object *obj;

        int result;
        int refcheck;

        LASSERT(PageLocked(vmpage));
        LASSERT(!PageWriteback(vmpage));

        /*
         * It is safe to not check anything in invalidatepage/releasepage
         * below because they are run with page locked and all our io is
         * happening with locked page too
         */
        result = 0;
        if (offset == 0) {
                env = cl_env_get(&refcheck);
                if (!IS_ERR(env)) {
                        inode = vmpage->mapping->host;
                        obj = ll_i2info(inode)->lli_clob;
                        if (obj != NULL) {
                                page = cl_vmpage_page(vmpage, obj);
                                if (page != NULL) {
                                        lu_ref_add(&page->cp_reference,
                                                   "delete", vmpage);
                                        cl_page_delete(env, page);
                                        result = 1;
                                        lu_ref_del(&page->cp_reference,
                                                   "delete", vmpage);
                                        cl_page_put(env, page);
                                }
                        } else
                                LASSERT(vmpage->private == 0);
                        cl_env_put(env, &refcheck);
                }