Beispiel #1
0
/**
 * Prepare partially written-to page for a write.
 */
static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
                                   struct cl_page *pg)
{
    struct cl_attr *attr   = vvp_env_thread_attr(env);
    struct cl_object *obj  = io->ci_obj;
    struct vvp_page *vpg   = cl_object_page_slice(obj, pg);
    loff_t          offset = cl_offset(obj, vvp_index(vpg));
    int             result;

    cl_object_attr_lock(obj);
    result = cl_object_attr_get(env, obj, attr);
    cl_object_attr_unlock(obj);
    if (result == 0) {
        /*
         * If are writing to a new page, no need to read old data.
         * The extent locking will have updated the KMS, and for our
         * purposes here we can treat it like i_size.
         */
        if (attr->cat_kms <= offset) {
            char *kaddr = ll_kmap_atomic(vpg->vpg_page, KM_USER0);

            memset(kaddr, 0, cl_page_size(obj));
            ll_kunmap_atomic(kaddr, KM_USER0);
        } else if (vpg->vpg_defer_uptodate)
            vpg->vpg_ra_used = 1;
        else
            result = ll_page_sync_io(env, io, pg, CRT_READ);
    }
    return result;
}
Beispiel #2
0
static void osc_page_touch_at(const struct lu_env *env,
			      struct cl_object *obj, pgoff_t idx, unsigned to)
{
	struct lov_oinfo  *loi  = cl2osc(obj)->oo_oinfo;
	struct cl_attr    *attr = &osc_env_info(env)->oti_attr;
	int valid;
	__u64 kms;

	/* offset within stripe */
	kms = cl_offset(obj, idx) + to;

	cl_object_attr_lock(obj);
	/*
	 * XXX old code used
	 *
	 *	 ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
	 *
	 * here
	 */
	CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
	       kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
	       loi->loi_lvb.lvb_size);

	valid = 0;
	if (kms > loi->loi_kms) {
		attr->cat_kms = kms;
		valid |= CAT_KMS;
	}
	if (kms > loi->loi_lvb.lvb_size) {
		attr->cat_size = kms;
		valid |= CAT_SIZE;
	}
	cl_object_attr_set(env, obj, attr, valid);
	cl_object_attr_unlock(obj);
}
Beispiel #3
0
static void vvp_object_size_lock(struct cl_object *obj)
{
	struct inode *inode = vvp_object_inode(obj);

	ll_inode_size_lock(inode);
	cl_object_attr_lock(obj);
}
/**
 * Updates object attributes from a lock value block (lvb) received together
 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
 * logic.
 *
 * This can be optimized to not update attributes when lock is a result of a
 * local match.
 *
 * Called under lock and resource spin-locks.
 */
static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
				int rc)
{
	struct ost_lvb    *lvb;
	struct cl_object  *obj;
	struct lov_oinfo  *oinfo;
	struct cl_attr    *attr;
	unsigned	   valid;

	if (!(olck->ols_flags & LDLM_FL_LVB_READY))
		return;

	lvb   = &olck->ols_lvb;
	obj   = olck->ols_cl.cls_obj;
	oinfo = cl2osc(obj)->oo_oinfo;
	attr  = &osc_env_info(env)->oti_attr;
	valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
	cl_lvb2attr(attr, lvb);

	cl_object_attr_lock(obj);
	if (rc == 0) {
		struct ldlm_lock  *dlmlock;
		__u64 size;

		dlmlock = olck->ols_lock;
		LASSERT(dlmlock != NULL);

		/* re-grab LVB from a dlm lock under DLM spin-locks. */
		*lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
		size = lvb->lvb_size;
		/* Extend KMS up to the end of this lock and no further
		 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
		if (size > dlmlock->l_policy_data.l_extent.end)
			size = dlmlock->l_policy_data.l_extent.end + 1;
		if (size >= oinfo->loi_kms) {
			LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
				   ", kms="LPU64, lvb->lvb_size, size);
			valid |= CAT_KMS;
			attr->cat_kms = size;
		} else {
			LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
				   LPU64"; leaving kms="LPU64", end="LPU64,
				   lvb->lvb_size, oinfo->loi_kms,
				   dlmlock->l_policy_data.l_extent.end);
		}
		ldlm_lock_allow_match_locked(dlmlock);
	} else if (rc == -ENAVAIL && olck->ols_glimpse) {
		CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
		       " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
	} else
		valid = 0;

	if (valid != 0)
		cl_object_attr_set(env, obj, attr, valid);

	cl_object_attr_unlock(obj);
}
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int mdc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct cl_object *obj = NULL;
	int result = 0;
	bool discard;
	enum cl_lock_mode mode = CLM_READ;

	ENTRY;

	LASSERT(flag == LDLM_CB_CANCELING);
	LASSERT(dlmlock != NULL);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
		dlmlock->l_ast_data = NULL;
		unlock_res_and_lock(dlmlock);
		RETURN(0);
	}

	discard = ldlm_is_discard_data(dlmlock);
	if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
		mode = CLM_WRITE;

	if (dlmlock->l_ast_data != NULL) {
		obj = osc2cl(dlmlock->l_ast_data);
		dlmlock->l_ast_data = NULL;
		cl_object_get(obj);
	}
	ldlm_set_kms_ignore(dlmlock);
	unlock_res_and_lock(dlmlock);

	/* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
	 * the object has been destroyed. */
	if (obj != NULL) {
		struct cl_attr *attr = &osc_env_info(env)->oti_attr;

		/* Destroy pages covered by the extent of the DLM lock */
		result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
					CL_PAGE_EOF, mode, discard);
		/* Losing a lock, set KMS to 0.
		 * NB: assumed that DOM lock covers whole data on MDT.
		 */
		/* losing a lock, update kms */
		lock_res_and_lock(dlmlock);
		cl_object_attr_lock(obj);
		attr->cat_kms = 0;
		cl_object_attr_update(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
		unlock_res_and_lock(dlmlock);
		cl_object_put(env, obj);
	}
	RETURN(result);
}
/**
 * Updates object attributes from a lock value block (lvb) received together
 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
 * logic.
 *
 * This can be optimized to not update attributes when lock is a result of a
 * local match.
 *
 * Called under lock and resource spin-locks.
 */
static void osc_lock_lvb_update(const struct lu_env *env,
				struct osc_object *osc,
				struct ldlm_lock *dlmlock,
				struct ost_lvb *lvb)
{
	struct cl_object  *obj = osc2cl(osc);
	struct lov_oinfo  *oinfo = osc->oo_oinfo;
	struct cl_attr    *attr = &osc_env_info(env)->oti_attr;
	unsigned           valid;

	ENTRY;

	valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
	if (lvb == NULL) {
		LASSERT(dlmlock != NULL);
		lvb = dlmlock->l_lvb_data;
	}
	cl_lvb2attr(attr, lvb);

	cl_object_attr_lock(obj);
	if (dlmlock != NULL) {
		__u64 size;

		check_res_locked(dlmlock->l_resource);

		LASSERT(lvb == dlmlock->l_lvb_data);
                size = lvb->lvb_size;

                /* Extend KMS up to the end of this lock and no further
                 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
                if (size > dlmlock->l_policy_data.l_extent.end)
                        size = dlmlock->l_policy_data.l_extent.end + 1;
                if (size >= oinfo->loi_kms) {
			LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu"
				   ", kms=%llu", lvb->lvb_size, size);
                        valid |= CAT_KMS;
                        attr->cat_kms = size;
                } else {
                        LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
				   "%llu; leaving kms=%llu, end=%llu",
                                   lvb->lvb_size, oinfo->loi_kms,
                                   dlmlock->l_policy_data.l_extent.end);
                }
		ldlm_lock_allow_match_locked(dlmlock);
	}

	cl_object_attr_update(env, obj, attr, valid);
	cl_object_attr_unlock(obj);

	EXIT;
}
Beispiel #7
0
static int osc_io_write_start(const struct lu_env *env,
			      const struct cl_io_slice *slice)
{
	struct cl_object *obj   = slice->cis_obj;
	struct cl_attr   *attr  = &osc_env_info(env)->oti_attr;
	int rc = 0;

	OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
	cl_object_attr_lock(obj);
	attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
	rc = cl_object_attr_set(env, obj, attr, CAT_MTIME | CAT_CTIME);
	cl_object_attr_unlock(obj);

	return rc;
}
Beispiel #8
0
static int osc_io_read_start(const struct lu_env *env,
			     const struct cl_io_slice *slice)
{
	struct cl_object *obj   = slice->cis_obj;
	struct cl_attr   *attr  = &osc_env_info(env)->oti_attr;
	int rc = 0;

	if (!slice->cis_io->ci_noatime) {
		cl_object_attr_lock(obj);
		attr->cat_atime = LTIME_S(CURRENT_TIME);
		rc = cl_object_attr_set(env, obj, attr, CAT_ATIME);
		cl_object_attr_unlock(obj);
	}
	return rc;
}
Beispiel #9
0
static int osc_io_read_start(const struct lu_env *env,
                             const struct cl_io_slice *slice)
{
	struct osc_io	 *oio  = cl2osc_io(env, slice);
	struct cl_object *obj  = slice->cis_obj;
	struct cl_attr	 *attr = &osc_env_info(env)->oti_attr;
	int rc = 0;
	ENTRY;

	if (oio->oi_lockless == 0 && !slice->cis_io->ci_noatime) {
		cl_object_attr_lock(obj);
		attr->cat_atime = LTIME_S(CFS_CURRENT_TIME);
		rc = cl_object_attr_set(env, obj, attr, CAT_ATIME);
		cl_object_attr_unlock(obj);
	}

	RETURN(rc);
}
Beispiel #10
0
/**
 * Breaks a link between osc_lock and dlm_lock.
 */
static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
{
	struct ldlm_lock *dlmlock;

	spin_lock(&osc_ast_guard);
	dlmlock = olck->ols_lock;
	if (dlmlock == NULL) {
		spin_unlock(&osc_ast_guard);
		return;
	}

	olck->ols_lock = NULL;
	/* wb(); --- for all who checks (ols->ols_lock != NULL) before
	 * call to osc_lock_detach() */
	dlmlock->l_ast_data = NULL;
	olck->ols_handle.cookie = 0ULL;
	spin_unlock(&osc_ast_guard);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
		struct cl_object *obj = olck->ols_cl.cls_obj;
		struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
		__u64 old_kms;

		cl_object_attr_lock(obj);
		/* Must get the value under the lock to avoid possible races. */
		old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
		/* Update the kms. Need to loop all granted locks.
		 * Not a problem for the client */
		attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);

		cl_object_attr_set(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
	}
	unlock_res_and_lock(dlmlock);

	/* release a reference taken in osc_lock_upcall0(). */
	LASSERT(olck->ols_has_ref);
	lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
	LDLM_LOCK_RELEASE(dlmlock);
	olck->ols_has_ref = 0;
}
Beispiel #11
0
/**
 * Prepare partially written-to page for a write.
 */
static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
                                  struct cl_object *obj, struct cl_page *pg,
                                  struct ccc_page *cp,
                                  unsigned from, unsigned to)
{
        struct cl_attr *attr   = ccc_env_thread_attr(env);
        loff_t          offset = cl_offset(obj, pg->cp_index);
        int             result;

        cl_object_attr_lock(obj);
        result = cl_object_attr_get(env, obj, attr);
        cl_object_attr_unlock(obj);
        if (result == 0) {
                /*
                 * If are writing to a new page, no need to read old data.
                 * The extent locking will have updated the KMS, and for our
                 * purposes here we can treat it like i_size.
                 */
                if (attr->cat_kms <= offset) {
                        char *kaddr = ll_kmap_atomic(cp->cpg_page, KM_USER0);

                        memset(kaddr, 0, cl_page_size(obj));
                        ll_kunmap_atomic(kaddr, KM_USER0);
                } else if (cp->cpg_defer_uptodate)
                        cp->cpg_ra_used = 1;
                else
                        result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
                /*
                 * In older implementations, obdo_refresh_inode is called here
                 * to update the inode because the write might modify the
                 * object info at OST. However, this has been proven useless,
                 * since LVB functions will be called when user space program
                 * tries to retrieve inode attribute.  Also, see bug 15909 for
                 * details. -jay
                 */
                if (result == 0)
                        cl_page_export(env, pg, 1);
        }
        return result;
}
Beispiel #12
0
static int osc_io_write_start(const struct lu_env *env,
                              const struct cl_io_slice *slice)
{
        struct osc_io    *oio   = cl2osc_io(env, slice);
        struct cl_object *obj   = slice->cis_obj;
        struct cl_attr   *attr  = &osc_env_info(env)->oti_attr;
        int              result = 0;
        ENTRY;

        if (oio->oi_lockless == 0) {
		OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
                cl_object_attr_lock(obj);
                result = cl_object_attr_get(env, obj, attr);
                if (result == 0) {
                        attr->cat_mtime = attr->cat_ctime =
                                LTIME_S(CFS_CURRENT_TIME);
                        result = cl_object_attr_set(env, obj, attr,
                                                    CAT_MTIME | CAT_CTIME);
                }
                cl_object_attr_unlock(obj);
        }
        RETURN(result);
}
Beispiel #13
0
static int vvp_io_setattr_time(const struct lu_env *env,
                               const struct cl_io_slice *ios)
{
        struct cl_io       *io    = ios->cis_io;
        struct cl_object   *obj   = io->ci_obj;
        struct cl_attr     *attr  = ccc_env_thread_attr(env);
        int result;
        unsigned valid = CAT_CTIME;

        cl_object_attr_lock(obj);
        attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
        if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
                attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
                valid |= CAT_ATIME;
        }
        if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
                attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
                valid |= CAT_MTIME;
        }
        result = cl_object_attr_set(env, obj, attr, valid);
        cl_object_attr_unlock(obj);

        return result;
}
Beispiel #14
0
/**
 * This is called when a page is accessed within file in a way that creates
 * new page, if one were missing (i.e., if there were a hole at that place in
 * the file, or accessed page is beyond the current file size).
 *
 * Expand stripe KMS if necessary.
 */
static void osc_page_touch_at(const struct lu_env *env,
			      struct cl_object *obj, pgoff_t idx, size_t to)
{
        struct lov_oinfo  *loi  = cl2osc(obj)->oo_oinfo;
        struct cl_attr    *attr = &osc_env_info(env)->oti_attr;
        int valid;
        __u64 kms;

        /* offset within stripe */
        kms = cl_offset(obj, idx) + to;

        cl_object_attr_lock(obj);
        /*
         * XXX old code used
         *
         *         ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
         *
         * here
         */
        CDEBUG(D_INODE, "stripe KMS %sincreasing "LPU64"->"LPU64" "LPU64"\n",
               kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
               loi->loi_lvb.lvb_size);

	attr->cat_mtime = attr->cat_ctime = LTIME_S(CFS_CURRENT_TIME);
	valid = CAT_MTIME | CAT_CTIME;
	if (kms > loi->loi_kms) {
		attr->cat_kms = kms;
		valid |= CAT_KMS;
	}
	if (kms > loi->loi_lvb.lvb_size) {
		attr->cat_size = kms;
		valid |= CAT_SIZE;
	}
	cl_object_attr_update(env, obj, attr, valid);
	cl_object_attr_unlock(obj);
}
Beispiel #15
0
static int osc_io_setattr_start(const struct lu_env *env,
				const struct cl_io_slice *slice)
{
	struct cl_io	    *io     = slice->cis_io;
	struct osc_io	   *oio    = cl2osc_io(env, slice);
	struct cl_object	*obj    = slice->cis_obj;
	struct lov_oinfo	*loi    = cl2osc(obj)->oo_oinfo;
	struct cl_attr	  *attr   = &osc_env_info(env)->oti_attr;
	struct obdo	     *oa     = &oio->oi_oa;
	struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
	__u64		    size   = io->u.ci_setattr.sa_attr.lvb_size;
	unsigned int	     ia_valid = io->u.ci_setattr.sa_valid;
	int		      result = 0;
	struct obd_info	  oinfo = { { { 0 } } };

	/* truncate cache dirty pages first */
	if (cl_io_is_trunc(io))
		result = osc_cache_truncate_start(env, oio, cl2osc(obj), size);

	if (result == 0 && oio->oi_lockless == 0) {
		cl_object_attr_lock(obj);
		result = cl_object_attr_get(env, obj, attr);
		if (result == 0) {
			struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
			unsigned int cl_valid = 0;

			if (ia_valid & ATTR_SIZE) {
				attr->cat_size = attr->cat_kms = size;
				cl_valid = (CAT_SIZE | CAT_KMS);
			}
			if (ia_valid & ATTR_MTIME_SET) {
				attr->cat_mtime = lvb->lvb_mtime;
				cl_valid |= CAT_MTIME;
			}
			if (ia_valid & ATTR_ATIME_SET) {
				attr->cat_atime = lvb->lvb_atime;
				cl_valid |= CAT_ATIME;
			}
			if (ia_valid & ATTR_CTIME_SET) {
				attr->cat_ctime = lvb->lvb_ctime;
				cl_valid |= CAT_CTIME;
			}
			result = cl_object_attr_set(env, obj, attr, cl_valid);
		}
		cl_object_attr_unlock(obj);
	}
	memset(oa, 0, sizeof(*oa));
	if (result == 0) {
		oa->o_oi = loi->loi_oi;
		oa->o_mtime = attr->cat_mtime;
		oa->o_atime = attr->cat_atime;
		oa->o_ctime = attr->cat_ctime;
		oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
			OBD_MD_FLCTIME | OBD_MD_FLMTIME;
		if (ia_valid & ATTR_SIZE) {
			oa->o_size = size;
			oa->o_blocks = OBD_OBJECT_EOF;
			oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;

			if (oio->oi_lockless) {
				oa->o_flags = OBD_FL_SRVLOCK;
				oa->o_valid |= OBD_MD_FLFLAGS;
			}
		} else {
			LASSERT(oio->oi_lockless == 0);
		}

		oinfo.oi_oa = oa;
		oinfo.oi_capa = io->u.ci_setattr.sa_capa;
		init_completion(&cbargs->opc_sync);

		if (ia_valid & ATTR_SIZE)
			result = osc_punch_base(osc_export(cl2osc(obj)),
						&oinfo, osc_async_upcall,
						cbargs, PTLRPCD_SET);
		else
			result = osc_setattr_async_base(osc_export(cl2osc(obj)),
							&oinfo, NULL,
							osc_async_upcall,
							cbargs, PTLRPCD_SET);
		cbargs->opc_rpc_sent = result == 0;
	}
	return result;
}
Beispiel #16
0
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int osc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct cl_object *obj = NULL;
	int result = 0;
	int discard;
	enum cl_lock_mode mode = CLM_READ;

	LASSERT(flag == LDLM_CB_CANCELING);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
		dlmlock->l_ast_data = NULL;
		unlock_res_and_lock(dlmlock);
		return 0;
	}

	discard = ldlm_is_discard_data(dlmlock);
	if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
		mode = CLM_WRITE;

	if (dlmlock->l_ast_data) {
		obj = osc2cl(dlmlock->l_ast_data);
		dlmlock->l_ast_data = NULL;

		cl_object_get(obj);
	}

	unlock_res_and_lock(dlmlock);

	/* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
	 * the object has been destroyed.
	 */
	if (obj) {
		struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
		struct cl_attr *attr = &osc_env_info(env)->oti_attr;
		__u64 old_kms;

		/* Destroy pages covered by the extent of the DLM lock */
		result = osc_lock_flush(cl2osc(obj),
					cl_index(obj, extent->start),
					cl_index(obj, extent->end),
					mode, discard);

		/* losing a lock, update kms */
		lock_res_and_lock(dlmlock);
		cl_object_attr_lock(obj);
		/* Must get the value under the lock to avoid race. */
		old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
		/* Update the kms. Need to loop all granted locks.
		 * Not a problem for the client
		 */
		attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);

		cl_object_attr_set(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
		unlock_res_and_lock(dlmlock);

		cl_object_put(env, obj);
	}
	return result;
}