Ejemplo n.º 1
0
static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
                            struct cl_page *page, struct ccc_page *cp,
                            enum cl_req_type crt)
{
        struct cl_2queue  *queue;
        int result;

        LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);

        queue = &io->ci_queue;
        cl_2queue_init_page(queue, page);

	result = cl_io_submit_sync(env, io, crt, queue, 0);
        LASSERT(cl_page_is_owned(page, io));

        if (crt == CRT_READ)
                /*
                 * in CRT_WRITE case page is left locked even in case of
                 * error.
                 */
                cl_page_list_disown(env, io, &queue->c2_qin);
        cl_2queue_fini(env, queue);

        return result;
}
Ejemplo n.º 2
0
Archivo: rw26.c Proyecto: rread/lustre
static int ll_write_end(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned copied,
                        struct page *vmpage, void *fsdata)
{
    struct ll_cl_context *lcc = fsdata;
    const struct lu_env *env;
    struct cl_io *io;
    struct vvp_io *vio;
    struct cl_page *page;
    unsigned from = pos & (PAGE_SIZE - 1);
    bool unplug = false;
    int result = 0;
    ENTRY;

    put_page(vmpage);

    LASSERT(lcc != NULL);
    env  = lcc->lcc_env;
    page = lcc->lcc_page;
    io   = lcc->lcc_io;
    vio  = vvp_env_io(env);

    LASSERT(cl_page_is_owned(page, io));
    if (copied > 0) {
        struct cl_page_list *plist = &vio->u.write.vui_queue;

        lcc->lcc_page = NULL; /* page will be queued */

        /* Add it into write queue */
        cl_page_list_add(plist, page);
        if (plist->pl_nr == 1) /* first page */
            vio->u.write.vui_from = from;
        else
            LASSERT(from == 0);
        vio->u.write.vui_to = from + copied;

        /* To address the deadlock in balance_dirty_pages() where
         * this dirty page may be written back in the same thread. */
        if (PageDirty(vmpage))
            unplug = true;

        /* We may have one full RPC, commit it soon */
        if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
            unplug = true;

        CL_PAGE_DEBUG(D_VFSTRACE, env, page,
                      "queued page: %d.\n", plist->pl_nr);
    } else {
        cl_page_disown(env, io, page);

        lcc->lcc_page = NULL;
        lu_ref_del(&page->cp_reference, "cl_io", io);
        cl_page_put(env, page);

        /* page list is not contiguous now, commit it now */
        unplug = true;
    }
    if (unplug ||
            file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
        result = vvp_io_write_commit(env, io);

    if (result < 0)
        io->ci_result = result;
    RETURN(result >= 0 ? copied : result);
}
Ejemplo n.º 3
0
static int vvp_io_fault_start(const struct lu_env *env,
                              const struct cl_io_slice *ios)
{
	struct vvp_io       *vio     = cl2vvp_io(env, ios);
	struct cl_io        *io      = ios->cis_io;
	struct cl_object    *obj     = io->ci_obj;
	struct inode        *inode   = ccc_object_inode(obj);
	struct cl_fault_io  *fio     = &io->u.ci_fault;
	struct vvp_fault_io *cfio    = &vio->u.fault;
	loff_t               offset;
	int                  result  = 0;
	struct page          *vmpage  = NULL;
	struct cl_page      *page;
	loff_t               size;
	pgoff_t              last; /* last page in a file data region */

        if (fio->ft_executable &&
            LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
                CWARN("binary "DFID
                      " changed while waiting for the page fault lock\n",
                      PFID(lu_object_fid(&obj->co_lu)));

        /* offset of the last byte on the page */
        offset = cl_offset(obj, fio->ft_index + 1) - 1;
        LASSERT(cl_index(obj, offset) == fio->ft_index);
        result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
        if (result != 0)
                return result;

	/* must return locked page */
	if (fio->ft_mkwrite) {
		LASSERT(cfio->ft_vmpage != NULL);
		lock_page(cfio->ft_vmpage);
	} else {
		result = vvp_io_kernel_fault(cfio);
		if (result != 0)
			return result;
	}

	vmpage = cfio->ft_vmpage;
	LASSERT(PageLocked(vmpage));

	if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
		ll_invalidate_page(vmpage);

	size = i_size_read(inode);
        /* Though we have already held a cl_lock upon this page, but
         * it still can be truncated locally. */
	if (unlikely((vmpage->mapping != inode->i_mapping) ||
		     (page_offset(vmpage) > size))) {
                CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");

                /* return +1 to stop cl_io_loop() and ll_fault() will catch
                 * and retry. */
                GOTO(out, result = +1);
        }


	if (fio->ft_mkwrite ) {
		pgoff_t last_index;
		/*
		 * Capture the size while holding the lli_trunc_sem from above
		 * we want to make sure that we complete the mkwrite action
		 * while holding this lock. We need to make sure that we are
		 * not past the end of the file.
		 */
		last_index = cl_index(obj, size - 1);
		if (last_index < fio->ft_index) {
			CDEBUG(D_PAGE,
				"llite: mkwrite and truncate race happened: "
				"%p: 0x%lx 0x%lx\n",
				vmpage->mapping,fio->ft_index,last_index);
			/*
			 * We need to return if we are
			 * passed the end of the file. This will propagate
			 * up the call stack to ll_page_mkwrite where
			 * we will return VM_FAULT_NOPAGE. Any non-negative
			 * value returned here will be silently
			 * converted to 0. If the vmpage->mapping is null
			 * the error code would be converted back to ENODATA
			 * in ll_page_mkwrite0. Thus we return -ENODATA
			 * to handle both cases
			 */
			GOTO(out, result = -ENODATA);
		}
	}

        page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
        if (IS_ERR(page))
                GOTO(out, result = PTR_ERR(page));

        /* if page is going to be written, we should add this page into cache
         * earlier. */
        if (fio->ft_mkwrite) {
                wait_on_page_writeback(vmpage);
                if (set_page_dirty(vmpage)) {
                        struct ccc_page *cp;

                        /* vvp_page_assume() calls wait_on_page_writeback(). */
                        cl_page_assume(env, io, page);

                        cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
                        vvp_write_pending(cl2ccc(obj), cp);

                        /* Do not set Dirty bit here so that in case IO is
                         * started before the page is really made dirty, we
                         * still have chance to detect it. */
                        result = cl_page_cache_add(env, io, page, CRT_WRITE);
			LASSERT(cl_page_is_owned(page, io));

			vmpage = NULL;
			if (result < 0) {
				cl_page_unmap(env, io, page);
				cl_page_discard(env, io, page);
				cl_page_disown(env, io, page);

				cl_page_put(env, page);

				/* we're in big trouble, what can we do now? */
				if (result == -EDQUOT)
					result = -ENOSPC;
				GOTO(out, result);
			} else
				cl_page_disown(env, io, page);
		}
	}

	last = cl_index(obj, size - 1);
	/*
	 * The ft_index is only used in the case of
	 * a mkwrite action. We need to check
	 * our assertions are correct, since
	 * we should have caught this above
	 */
	LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
        if (fio->ft_index == last)
                /*
                 * Last page is mapped partially.
                 */
                fio->ft_nob = size - cl_offset(obj, fio->ft_index);
        else
                fio->ft_nob = cl_page_size(obj);

        lu_ref_add(&page->cp_reference, "fault", io);
        fio->ft_page = page;
        EXIT;

out:
	/* return unlocked vmpage to avoid deadlocking */
	if (vmpage != NULL)
		unlock_page(vmpage);
	cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
	return result;
}