Example #1
0
/**
 * Implements cl_io_operations::cio_prepare_write() method for osc layer.
 *
 * \retval -EIO transfer initiated against this osc will most likely fail
 * \retval 0    transfer initiated against this osc will most likely succeed.
 *
 * The reason for this check is to immediately return an error to the caller
 * in the case of a deactivated import. Note, that import can be deactivated
 * later, while pages, dirtied by this IO, are still in the cache, but this is
 * irrelevant, because that would still return an error to the application (if
 * it does fsync), but many applications don't do fsync because of performance
 * issues, and we wanted to return an -EIO at write time to notify the
 * application.
 */
static int osc_io_prepare_write(const struct lu_env *env,
				const struct cl_io_slice *ios,
				const struct cl_page_slice *slice,
				unsigned from, unsigned to)
{
	struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev);
	struct obd_import *imp = class_exp2cliimp(dev->od_exp);
	struct osc_io     *oio = cl2osc_io(env, ios);
	int result = 0;

	/*
	 * This implements OBD_BRW_CHECK logic from old client.
	 */

	if (imp == NULL || imp->imp_invalid)
		result = -EIO;
	if (result == 0 && oio->oi_lockless)
		/* this page contains `invalid' data, but who cares?
		 * nobody can access the invalid data.
		 * in osc_io_commit_write(), we're going to write exact
		 * [from, to) bytes of this page to OST. -jay */
		cl_page_export(env, slice->cpl_page, 1);

	return result;
}
Example #2
0
static int vvp_io_read_page(const struct lu_env *env,
                            const struct cl_io_slice *ios,
                            const struct cl_page_slice *slice)
{
        struct cl_io              *io     = ios->cis_io;
        struct cl_object          *obj    = slice->cpl_obj;
        struct ccc_page           *cp     = cl2ccc_page(slice);
        struct cl_page            *page   = slice->cpl_page;
        struct inode              *inode  = ccc_object_inode(obj);
        struct ll_sb_info         *sbi    = ll_i2sbi(inode);
        struct ll_file_data       *fd     = cl2ccc_io(env, ios)->cui_fd;
        struct ll_readahead_state *ras    = &fd->fd_ras;
	struct page                *vmpage = cp->cpg_page;
        struct cl_2queue          *queue  = &io->ci_queue;
        int rc;

        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
        LASSERT(slice->cpl_obj == obj);

        ENTRY;

        if (sbi->ll_ra_info.ra_max_pages_per_file &&
            sbi->ll_ra_info.ra_max_pages)
                ras_update(sbi, inode, ras, page->cp_index,
                           cp->cpg_defer_uptodate);

        /* Sanity check whether the page is protected by a lock. */
        rc = cl_page_is_under_lock(env, io, page);
        if (rc != -EBUSY) {
                CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
                               rc == -ENODATA ? "without a lock" :
                               "match failed", rc);
                if (rc != -ENODATA)
                        RETURN(rc);
        }

        if (cp->cpg_defer_uptodate) {
                cp->cpg_ra_used = 1;
                cl_page_export(env, page, 1);
        }
        /*
         * Add page into the queue even when it is marked uptodate above.
         * this will unlock it automatically as part of cl_page_list_disown().
         */
        cl_2queue_add(queue, page);
        if (sbi->ll_ra_info.ra_max_pages_per_file &&
            sbi->ll_ra_info.ra_max_pages)
                ll_readahead(env, io, ras,
                             vmpage->mapping, &queue->c2_qin, fd->fd_flags);

        RETURN(0);
}
Example #3
0
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
			struct cl_page *page, struct page *vmpage)
{
	struct lov_page *lpg = cl_object_page_slice(obj, page);
	void *addr;

	cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
	addr = kmap(vmpage);
	memset(addr, 0, cl_page_size(obj));
	kunmap(vmpage);
	cl_page_export(env, page, 1);
	return 0;
}
Example #4
0
/**
 * Prepare partially written-to page for a write.
 */
static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
                                  struct cl_object *obj, struct cl_page *pg,
                                  struct ccc_page *cp,
                                  unsigned from, unsigned to)
{
        struct cl_attr *attr   = ccc_env_thread_attr(env);
        loff_t          offset = cl_offset(obj, pg->cp_index);
        int             result;

        cl_object_attr_lock(obj);
        result = cl_object_attr_get(env, obj, attr);
        cl_object_attr_unlock(obj);
        if (result == 0) {
                /*
                 * If are writing to a new page, no need to read old data.
                 * The extent locking will have updated the KMS, and for our
                 * purposes here we can treat it like i_size.
                 */
                if (attr->cat_kms <= offset) {
                        char *kaddr = ll_kmap_atomic(cp->cpg_page, KM_USER0);

                        memset(kaddr, 0, cl_page_size(obj));
                        ll_kunmap_atomic(kaddr, KM_USER0);
                } else if (cp->cpg_defer_uptodate)
                        cp->cpg_ra_used = 1;
                else
                        result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
                /*
                 * In older implementations, obdo_refresh_inode is called here
                 * to update the inode because the write might modify the
                 * object info at OST. However, this has been proven useless,
                 * since LVB functions will be called when user space program
                 * tries to retrieve inode attribute.  Also, see bug 15909 for
                 * details. -jay
                 */
                if (result == 0)
                        cl_page_export(env, pg, 1);
        }
        return result;
}