Пример #1
0
/**
 * Implements Linux VM address_space::invalidatepage() method. This method is
 * called when the page is truncate from a file, either as a result of
 * explicit truncate, or when inode is removed from memory (as a result of
 * final iput(), umount, or memory pressure induced icache shrinking).
 *
 * [0, offset] bytes of the page remain valid (this is for a case of not-page
 * aligned truncate). Lustre leaves partially truncated page in the cache,
 * relying on struct inode::i_size to limit further accesses.
 */
static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
			      unsigned int length)
{
	struct inode     *inode;
	struct lu_env    *env;
	struct cl_page   *page;
	struct cl_object *obj;

	LASSERT(PageLocked(vmpage));
	LASSERT(!PageWriteback(vmpage));

	/*
	 * It is safe to not check anything in invalidatepage/releasepage
	 * below because they are run with page locked and all our io is
	 * happening with locked page too
	 */
	if (offset == 0 && length == PAGE_SIZE) {
		/* See the comment in ll_releasepage() */
		env = cl_env_percpu_get();
		LASSERT(!IS_ERR(env));
		inode = vmpage->mapping->host;
		obj = ll_i2info(inode)->lli_clob;
		if (obj) {
			page = cl_vmpage_page(vmpage, obj);
			if (page) {
				cl_page_delete(env, page);
				cl_page_put(env, page);
			}
		} else {
			LASSERT(vmpage->private == 0);
		}
		cl_env_percpu_put(env);
	}
Пример #2
0
static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
{
    struct lu_env		*env;
    struct cl_object	*obj;
    struct cl_page		*page;
    struct address_space	*mapping;
    int result = 0;

    LASSERT(PageLocked(vmpage));
    if (PageWriteback(vmpage) || PageDirty(vmpage))
        return 0;

    mapping = vmpage->mapping;
    if (mapping == NULL)
        return 1;

    obj = ll_i2info(mapping->host)->lli_clob;
    if (obj == NULL)
        return 1;

    /* 1 for caller, 1 for cl_page and 1 for page cache */
    if (page_count(vmpage) > 3)
        return 0;

    page = cl_vmpage_page(vmpage, obj);
    if (page == NULL)
        return 1;

    env = cl_env_percpu_get();
    LASSERT(!IS_ERR(env));

    if (!cl_page_in_use(page)) {
        result = 1;
        cl_page_delete(env, page);
    }

    /* To use percpu env array, the call path can not be rescheduled;
     * otherwise percpu array will be messed if ll_releaspage() called
     * again on the same CPU.
     *
     * If this page holds the last refc of cl_object, the following
     * call path may cause reschedule:
     *   cl_page_put -> cl_page_free -> cl_object_put ->
     *     lu_object_put -> lu_object_free -> lov_delete_raid0.
     *
     * However, the kernel can't get rid of this inode until all pages have
     * been cleaned up. Now that we hold page lock here, it's pretty safe
     * that we won't get into object delete path.
     */
    LASSERT(cl_object_refc(obj) > 1);
    cl_page_put(env, page);

    cl_env_percpu_put(env);
    return result;
}
Пример #3
0
/**
 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
 * received from a server, or after osc_enqueue_base() matched a local DLM
 * lock.
 */
static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
			   int errcode)
{
	struct osc_lock         *oscl  = cookie;
	struct cl_lock_slice    *slice = &oscl->ols_cl;
	struct lu_env           *env;
	int			rc;

	ENTRY;

	env = cl_env_percpu_get();
	/* should never happen, similar to osc_ldlm_blocking_ast(). */
	LASSERT(!IS_ERR(env));

	rc = ldlm_error2errno(errcode);
	if (oscl->ols_state == OLS_ENQUEUED) {
		oscl->ols_state = OLS_UPCALL_RECEIVED;
	} else if (oscl->ols_state == OLS_CANCELLED) {
		rc = -EIO;
	} else {
		CERROR("Impossible state: %d\n", oscl->ols_state);
		LBUG();
	}

	if (rc == 0)
		osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);

	/* Error handling, some errors are tolerable. */
	if (oscl->ols_locklessable && rc == -EUSERS) {
		/* This is a tolerable error, turn this lock into
		 * lockless lock.
		 */
		osc_object_set_contended(cl2osc(slice->cls_obj));
		LASSERT(slice->cls_ops != oscl->ols_lockless_ops);

		/* Change this lock to ldlmlock-less lock. */
		osc_lock_to_lockless(env, oscl, 1);
		oscl->ols_state = OLS_GRANTED;
		rc = 0;
	} else if (oscl->ols_glimpse && rc == -ENAVAIL) {
		LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
		osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
				    NULL, &oscl->ols_lvb);
		/* Hide the error. */
		rc = 0;
	} else if (rc < 0 && oscl->ols_flags & LDLM_FL_NDELAY) {
		rc = -EWOULDBLOCK;
	}

	if (oscl->ols_owner != NULL)
		cl_sync_io_note(env, oscl->ols_owner, rc);
	cl_env_percpu_put(env);

	RETURN(rc);
}