Example #1
0
static int osc_io_ladvise_start(const struct lu_env *env,
				const struct cl_io_slice *slice)
{
	int			 result = 0;
	struct cl_io		*io = slice->cis_io;
	struct osc_io		*oio = cl2osc_io(env, slice);
	struct cl_object	*obj = slice->cis_obj;
	struct lov_oinfo	*loi = cl2osc(obj)->oo_oinfo;
	struct cl_ladvise_io	*lio = &io->u.ci_ladvise;
	struct obdo		*oa = &oio->oi_oa;
	struct osc_async_cbargs	*cbargs = &oio->oi_cbarg;
	struct lu_ladvise	*ladvise;
	struct ladvise_hdr	*ladvise_hdr;
	int			 buf_size;
	int			 num_advise = 1;
	ENTRY;

	/* TODO: add multiple ladvise support in CLIO */
	buf_size = offsetof(typeof(*ladvise_hdr), lah_advise[num_advise]);
	if (osc_env_info(env)->oti_ladvise_buf.lb_len < buf_size)
		lu_buf_realloc(&osc_env_info(env)->oti_ladvise_buf, buf_size);

	ladvise_hdr = osc_env_info(env)->oti_ladvise_buf.lb_buf;
	if (ladvise_hdr == NULL)
		RETURN(-ENOMEM);

	memset(ladvise_hdr, 0, buf_size);
	ladvise_hdr->lah_magic = LADVISE_MAGIC;
	ladvise_hdr->lah_count = num_advise;
	ladvise_hdr->lah_flags = lio->li_flags;

	memset(oa, 0, sizeof(*oa));
	oa->o_oi = loi->loi_oi;
	oa->o_valid = OBD_MD_FLID;
	obdo_set_parent_fid(oa, lio->li_fid);

	ladvise = ladvise_hdr->lah_advise;
	ladvise->lla_start = lio->li_start;
	ladvise->lla_end = lio->li_end;
	ladvise->lla_advice = lio->li_advice;

	if (lio->li_flags & LF_ASYNC) {
		result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
					  ladvise_hdr, NULL, NULL, NULL);
	} else {
		init_completion(&cbargs->opc_sync);
		result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
					  ladvise_hdr, osc_async_upcall,
					  cbargs, PTLRPCD_SET);
		cbargs->opc_rpc_sent = result == 0;
	}
	RETURN(result);
}
Example #2
0
static void osc_page_touch_at(const struct lu_env *env,
			      struct cl_object *obj, pgoff_t idx, unsigned to)
{
	struct lov_oinfo  *loi  = cl2osc(obj)->oo_oinfo;
	struct cl_attr    *attr = &osc_env_info(env)->oti_attr;
	int valid;
	__u64 kms;

	/* offset within stripe */
	kms = cl_offset(obj, idx) + to;

	cl_object_attr_lock(obj);
	/*
	 * XXX old code used
	 *
	 *	 ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
	 *
	 * here
	 */
	CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
	       kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
	       loi->loi_lvb.lvb_size);

	valid = 0;
	if (kms > loi->loi_kms) {
		attr->cat_kms = kms;
		valid |= CAT_KMS;
	}
	if (kms > loi->loi_lvb.lvb_size) {
		attr->cat_size = kms;
		valid |= CAT_SIZE;
	}
	cl_object_attr_set(env, obj, attr, valid);
	cl_object_attr_unlock(obj);
}
Example #3
0
/**
 * Discard pages protected by the given lock. This function traverses radix
 * tree to find all covering pages and discard them. If a page is being covered
 * by other locks, it should remain in cache.
 *
 * If error happens on any step, the process continues anyway (the reasoning
 * behind this being that lock cancellation cannot be delayed indefinitely).
 */
static int mdc_lock_discard_pages(const struct lu_env *env,
				  struct osc_object *osc,
				  pgoff_t start, pgoff_t end,
				  bool discard)
{
	struct osc_thread_info *info = osc_env_info(env);
	struct cl_io *io = &info->oti_io;
	osc_page_gang_cbt cb;
	int res;
	int result;

	ENTRY;

	io->ci_obj = cl_object_top(osc2cl(osc));
	io->ci_ignore_layout = 1;
	result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
	if (result != 0)
		GOTO(out, result);

	cb = discard ? osc_discard_cb : mdc_check_and_discard_cb;
	info->oti_fn_index = info->oti_next_index = start;
	do {
		res = osc_page_gang_lookup(env, io, osc, info->oti_next_index,
					   end, cb, (void *)osc);
		if (info->oti_next_index > end)
			break;

		if (res == CLP_GANG_RESCHED)
			cond_resched();
	} while (res != CLP_GANG_OKAY);
out:
	cl_io_fini(env, io);
	RETURN(result);
}
Example #4
0
/**
 * Check if page @page is covered by an extra lock or discard it.
 */
static int mdc_check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
				    struct osc_page *ops, void *cbdata)
{
	struct osc_thread_info *info = osc_env_info(env);
	struct osc_object *osc = cbdata;
	pgoff_t index;

	index = osc_index(ops);
	if (index >= info->oti_fn_index) {
		struct ldlm_lock *tmp;
		struct cl_page *page = ops->ops_cl.cpl_page;

		/* refresh non-overlapped index */
		tmp = mdc_dlmlock_at_pgoff(env, osc, index,
					   OSC_DAP_FL_TEST_LOCK);
		if (tmp != NULL) {
			info->oti_fn_index = CL_PAGE_EOF;
			LDLM_LOCK_PUT(tmp);
		} else if (cl_page_own(env, io, page) == 0) {
			/* discard the page */
			cl_page_discard(env, io, page);
			cl_page_disown(env, io, page);
		} else {
			LASSERT(page->cp_state == CPS_FREEING);
		}
	}

	info->oti_next_index = index + 1;
	return CLP_GANG_OKAY;
}
/**
 * Updates object attributes from a lock value block (lvb) received together
 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
 * logic.
 *
 * This can be optimized to not update attributes when lock is a result of a
 * local match.
 *
 * Called under lock and resource spin-locks.
 */
static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
				int rc)
{
	struct ost_lvb    *lvb;
	struct cl_object  *obj;
	struct lov_oinfo  *oinfo;
	struct cl_attr    *attr;
	unsigned	   valid;

	if (!(olck->ols_flags & LDLM_FL_LVB_READY))
		return;

	lvb   = &olck->ols_lvb;
	obj   = olck->ols_cl.cls_obj;
	oinfo = cl2osc(obj)->oo_oinfo;
	attr  = &osc_env_info(env)->oti_attr;
	valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
	cl_lvb2attr(attr, lvb);

	cl_object_attr_lock(obj);
	if (rc == 0) {
		struct ldlm_lock  *dlmlock;
		__u64 size;

		dlmlock = olck->ols_lock;
		LASSERT(dlmlock != NULL);

		/* re-grab LVB from a dlm lock under DLM spin-locks. */
		*lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
		size = lvb->lvb_size;
		/* Extend KMS up to the end of this lock and no further
		 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
		if (size > dlmlock->l_policy_data.l_extent.end)
			size = dlmlock->l_policy_data.l_extent.end + 1;
		if (size >= oinfo->loi_kms) {
			LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
				   ", kms="LPU64, lvb->lvb_size, size);
			valid |= CAT_KMS;
			attr->cat_kms = size;
		} else {
			LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
				   LPU64"; leaving kms="LPU64", end="LPU64,
				   lvb->lvb_size, oinfo->loi_kms,
				   dlmlock->l_policy_data.l_extent.end);
		}
		ldlm_lock_allow_match_locked(dlmlock);
	} else if (rc == -ENAVAIL && olck->ols_glimpse) {
		CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
		       " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
	} else
		valid = 0;

	if (valid != 0)
		cl_object_attr_set(env, obj, attr, valid);

	cl_object_attr_unlock(obj);
}
Example #6
0
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int mdc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct cl_object *obj = NULL;
	int result = 0;
	bool discard;
	enum cl_lock_mode mode = CLM_READ;

	ENTRY;

	LASSERT(flag == LDLM_CB_CANCELING);
	LASSERT(dlmlock != NULL);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
		dlmlock->l_ast_data = NULL;
		unlock_res_and_lock(dlmlock);
		RETURN(0);
	}

	discard = ldlm_is_discard_data(dlmlock);
	if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
		mode = CLM_WRITE;

	if (dlmlock->l_ast_data != NULL) {
		obj = osc2cl(dlmlock->l_ast_data);
		dlmlock->l_ast_data = NULL;
		cl_object_get(obj);
	}
	ldlm_set_kms_ignore(dlmlock);
	unlock_res_and_lock(dlmlock);

	/* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
	 * the object has been destroyed. */
	if (obj != NULL) {
		struct cl_attr *attr = &osc_env_info(env)->oti_attr;

		/* Destroy pages covered by the extent of the DLM lock */
		result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
					CL_PAGE_EOF, mode, discard);
		/* Losing a lock, set KMS to 0.
		 * NB: assumed that DOM lock covers whole data on MDT.
		 */
		/* losing a lock, update kms */
		lock_res_and_lock(dlmlock);
		cl_object_attr_lock(obj);
		attr->cat_kms = 0;
		cl_object_attr_update(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
		unlock_res_and_lock(dlmlock);
		cl_object_put(env, obj);
	}
	RETURN(result);
}
Example #7
0
static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
{
    struct osc_object       *osc = cl2osc(obj);
    struct ldlm_res_id      *resname = &osc_env_info(env)->oti_resname;

    /* DLM locks don't hold a reference of osc_object so we have to
     * clear it before the object is being destroyed. */
    ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
    ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname,
                          osc_object_ast_clear, osc);
    return 0;
}
/**
 * Updates object attributes from a lock value block (lvb) received together
 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
 * logic.
 *
 * This can be optimized to not update attributes when lock is a result of a
 * local match.
 *
 * Called under lock and resource spin-locks.
 */
static void osc_lock_lvb_update(const struct lu_env *env,
				struct osc_object *osc,
				struct ldlm_lock *dlmlock,
				struct ost_lvb *lvb)
{
	struct cl_object  *obj = osc2cl(osc);
	struct lov_oinfo  *oinfo = osc->oo_oinfo;
	struct cl_attr    *attr = &osc_env_info(env)->oti_attr;
	unsigned           valid;

	ENTRY;

	valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
	if (lvb == NULL) {
		LASSERT(dlmlock != NULL);
		lvb = dlmlock->l_lvb_data;
	}
	cl_lvb2attr(attr, lvb);

	cl_object_attr_lock(obj);
	if (dlmlock != NULL) {
		__u64 size;

		check_res_locked(dlmlock->l_resource);

		LASSERT(lvb == dlmlock->l_lvb_data);
                size = lvb->lvb_size;

                /* Extend KMS up to the end of this lock and no further
                 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
                if (size > dlmlock->l_policy_data.l_extent.end)
                        size = dlmlock->l_policy_data.l_extent.end + 1;
                if (size >= oinfo->loi_kms) {
			LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu"
				   ", kms=%llu", lvb->lvb_size, size);
                        valid |= CAT_KMS;
                        attr->cat_kms = size;
                } else {
                        LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
				   "%llu; leaving kms=%llu, end=%llu",
                                   lvb->lvb_size, oinfo->loi_kms,
                                   dlmlock->l_policy_data.l_extent.end);
                }
		ldlm_lock_allow_match_locked(dlmlock);
	}

	cl_object_attr_update(env, obj, attr, valid);
	cl_object_attr_unlock(obj);

	EXIT;
}
Example #9
0
static int osc_io_read_start(const struct lu_env *env,
			     const struct cl_io_slice *slice)
{
	struct cl_object *obj   = slice->cis_obj;
	struct cl_attr   *attr  = &osc_env_info(env)->oti_attr;
	int rc = 0;

	if (!slice->cis_io->ci_noatime) {
		cl_object_attr_lock(obj);
		attr->cat_atime = LTIME_S(CURRENT_TIME);
		rc = cl_object_attr_set(env, obj, attr, CAT_ATIME);
		cl_object_attr_unlock(obj);
	}
	return rc;
}
Example #10
0
static int osc_io_write_start(const struct lu_env *env,
			      const struct cl_io_slice *slice)
{
	struct cl_object *obj   = slice->cis_obj;
	struct cl_attr   *attr  = &osc_env_info(env)->oti_attr;
	int rc = 0;

	OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
	cl_object_attr_lock(obj);
	attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
	rc = cl_object_attr_set(env, obj, attr, CAT_MTIME | CAT_CTIME);
	cl_object_attr_unlock(obj);

	return rc;
}
Example #11
0
static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
{
	struct osc_object       *osc = cl2osc(obj);
	struct ldlm_res_id      *resname = &osc_env_info(env)->oti_resname;

	LASSERTF(osc->oo_npages == 0,
		 DFID "still have %lu pages, obj: %p, osc: %p\n",
		 PFID(lu_object_fid(&obj->co_lu)), osc->oo_npages, obj, osc);

	/* DLM locks don't hold a reference of osc_object so we have to
	 * clear it before the object is being destroyed. */
	ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
	ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname,
			      osc_object_ast_clear, osc);
	return 0;
}
Example #12
0
static int osc_io_read_start(const struct lu_env *env,
                             const struct cl_io_slice *slice)
{
	struct osc_io	 *oio  = cl2osc_io(env, slice);
	struct cl_object *obj  = slice->cis_obj;
	struct cl_attr	 *attr = &osc_env_info(env)->oti_attr;
	int rc = 0;
	ENTRY;

	if (oio->oi_lockless == 0 && !slice->cis_io->ci_noatime) {
		cl_object_attr_lock(obj);
		attr->cat_atime = LTIME_S(CFS_CURRENT_TIME);
		rc = cl_object_attr_set(env, obj, attr, CAT_ATIME);
		cl_object_attr_unlock(obj);
	}

	RETURN(rc);
}
Example #13
0
/**
 * Breaks a link between osc_lock and dlm_lock.
 */
static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
{
	struct ldlm_lock *dlmlock;

	spin_lock(&osc_ast_guard);
	dlmlock = olck->ols_lock;
	if (dlmlock == NULL) {
		spin_unlock(&osc_ast_guard);
		return;
	}

	olck->ols_lock = NULL;
	/* wb(); --- for all who checks (ols->ols_lock != NULL) before
	 * call to osc_lock_detach() */
	dlmlock->l_ast_data = NULL;
	olck->ols_handle.cookie = 0ULL;
	spin_unlock(&osc_ast_guard);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
		struct cl_object *obj = olck->ols_cl.cls_obj;
		struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
		__u64 old_kms;

		cl_object_attr_lock(obj);
		/* Must get the value under the lock to avoid possible races. */
		old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
		/* Update the kms. Need to loop all granted locks.
		 * Not a problem for the client */
		attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);

		cl_object_attr_set(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
	}
	unlock_res_and_lock(dlmlock);

	/* release a reference taken in osc_lock_upcall0(). */
	LASSERT(olck->ols_has_ref);
	lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
	LDLM_LOCK_RELEASE(dlmlock);
	olck->ols_has_ref = 0;
}
Example #14
0
/**
 * Called when a lock is granted, from an upcall (when server returned a
 * granted lock), or from completion AST, when server returned a blocked lock.
 *
 * Called under lock and resource spin-locks, that are released temporarily
 * here.
 */
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
			     struct ldlm_lock *dlmlock, int rc)
{
	struct ldlm_extent   *ext;
	struct cl_lock       *lock;
	struct cl_lock_descr *descr;

	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	ENTRY;
	if (olck->ols_state < OLS_GRANTED) {
		lock  = olck->ols_cl.cls_lock;
		ext   = &dlmlock->l_policy_data.l_extent;
		descr = &osc_env_info(env)->oti_descr;
		descr->cld_obj = lock->cll_descr.cld_obj;

		/* XXX check that ->l_granted_mode is valid. */
		descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
		descr->cld_start = cl_index(descr->cld_obj, ext->start);
		descr->cld_end   = cl_index(descr->cld_obj, ext->end);
		descr->cld_gid   = ext->gid;
		/*
		 * tell upper layers the extent of the lock that was actually
		 * granted
		 */
		olck->ols_state = OLS_GRANTED;
		osc_lock_lvb_update(env, olck, rc);

		/* release DLM spin-locks to allow cl_lock_{modify,signal}()
		 * to take a semaphore on a parent lock. This is safe, because
		 * spin-locks are needed to protect consistency of
		 * dlmlock->l_*_mode and LVB, and we have finished processing
		 * them. */
		unlock_res_and_lock(dlmlock);
		cl_lock_modify(env, lock, descr);
		cl_lock_signal(env, lock);
		LINVRNT(osc_lock_invariant(olck));
		lock_res_and_lock(dlmlock);
	}
	EXIT;
}
Example #15
0
/**
 * Finds an existing lock covering a page with given index.
 * Copy of osc_obj_dlmlock_at_pgoff() but for DoM IBITS lock.
 */
struct ldlm_lock *mdc_dlmlock_at_pgoff(const struct lu_env *env,
				       struct osc_object *obj, pgoff_t index,
				       enum osc_dap_flags dap_flags)
{
	struct osc_thread_info *info = osc_env_info(env);
	struct ldlm_res_id *resname = &info->oti_resname;
	union ldlm_policy_data *policy = &info->oti_policy;
	struct lustre_handle lockh;
	struct ldlm_lock *lock = NULL;
	enum ldlm_mode mode;
	__u64 flags;

	ENTRY;

	fid_build_reg_res_name(lu_object_fid(osc2lu(obj)), resname);
	mdc_lock_build_policy(env, policy);

	flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
	if (dap_flags & OSC_DAP_FL_TEST_LOCK)
		flags |= LDLM_FL_TEST_LOCK;

again:
	/* Next, search for already existing extent locks that will cover us */
	/* If we're trying to read, we also search for an existing PW lock.  The
	 * VFS and page cache already protect us locally, so lots of readers/
	 * writers can share a single PW lock. */
	mode = mdc_dom_lock_match(env, osc_export(obj), resname, LDLM_IBITS,
				  policy, LCK_PR | LCK_PW, &flags, obj, &lockh,
				  dap_flags & OSC_DAP_FL_CANCELING);
	if (mode != 0) {
		lock = ldlm_handle2lock(&lockh);
		/* RACE: the lock is cancelled so let's try again */
		if (unlikely(lock == NULL))
			goto again;
	}

	RETURN(lock);
}
Example #16
0
static int osc_io_write_start(const struct lu_env *env,
                              const struct cl_io_slice *slice)
{
        struct osc_io    *oio   = cl2osc_io(env, slice);
        struct cl_object *obj   = slice->cis_obj;
        struct cl_attr   *attr  = &osc_env_info(env)->oti_attr;
        int              result = 0;
        ENTRY;

        if (oio->oi_lockless == 0) {
		OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
                cl_object_attr_lock(obj);
                result = cl_object_attr_get(env, obj, attr);
                if (result == 0) {
                        attr->cat_mtime = attr->cat_ctime =
                                LTIME_S(CFS_CURRENT_TIME);
                        result = cl_object_attr_set(env, obj, attr,
                                                    CAT_MTIME | CAT_CTIME);
                }
                cl_object_attr_unlock(obj);
        }
        RETURN(result);
}
Example #17
0
/**
 * This is called when a page is accessed within file in a way that creates
 * new page, if one were missing (i.e., if there were a hole at that place in
 * the file, or accessed page is beyond the current file size).
 *
 * Expand stripe KMS if necessary.
 */
static void osc_page_touch_at(const struct lu_env *env,
			      struct cl_object *obj, pgoff_t idx, size_t to)
{
        struct lov_oinfo  *loi  = cl2osc(obj)->oo_oinfo;
        struct cl_attr    *attr = &osc_env_info(env)->oti_attr;
        int valid;
        __u64 kms;

        /* offset within stripe */
        kms = cl_offset(obj, idx) + to;

        cl_object_attr_lock(obj);
        /*
         * XXX old code used
         *
         *         ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
         *
         * here
         */
        CDEBUG(D_INODE, "stripe KMS %sincreasing "LPU64"->"LPU64" "LPU64"\n",
               kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
               loi->loi_lvb.lvb_size);

	attr->cat_mtime = attr->cat_ctime = LTIME_S(CFS_CURRENT_TIME);
	valid = CAT_MTIME | CAT_CTIME;
	if (kms > loi->loi_kms) {
		attr->cat_kms = kms;
		valid |= CAT_KMS;
	}
	if (kms > loi->loi_lvb.lvb_size) {
		attr->cat_size = kms;
		valid |= CAT_SIZE;
	}
	cl_object_attr_update(env, obj, attr, valid);
	cl_object_attr_unlock(obj);
}
Example #18
0
/**
 * Implementation of struct cl_object_operations::coo_req_attr_set() for osc
 * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
 * fields.
 */
static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
                             struct cl_req_attr *attr)
{
    struct lov_oinfo *oinfo;
    struct obdo      *oa;
    struct ost_lvb   *lvb;
    u64		  flags = attr->cra_flags;

    oinfo   = cl2osc(obj)->oo_oinfo;
    lvb     = &oinfo->loi_lvb;
    oa      = attr->cra_oa;

    if ((flags & OBD_MD_FLMTIME) != 0) {
        oa->o_mtime = lvb->lvb_mtime;
        oa->o_valid |= OBD_MD_FLMTIME;
    }
    if ((flags & OBD_MD_FLATIME) != 0) {
        oa->o_atime = lvb->lvb_atime;
        oa->o_valid |= OBD_MD_FLATIME;
    }
    if ((flags & OBD_MD_FLCTIME) != 0) {
        oa->o_ctime = lvb->lvb_ctime;
        oa->o_valid |= OBD_MD_FLCTIME;
    }
    if (flags & OBD_MD_FLGROUP) {
        ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
        oa->o_valid |= OBD_MD_FLGROUP;
    }
    if (flags & OBD_MD_FLID) {
        ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
        oa->o_valid |= OBD_MD_FLID;
    }
    if (flags & OBD_MD_FLHANDLE) {
        struct ldlm_lock *lock;
        struct osc_page *opg;

        opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
        lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
                                    OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING);
        if (lock == NULL && !opg->ops_srvlock) {
            struct ldlm_resource *res;
            struct ldlm_res_id *resname;

            CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
                          "uncovered page!\n");

            resname = &osc_env_info(env)->oti_resname;
            ostid_build_res_name(&oinfo->loi_oi, resname);
            res = ldlm_resource_get(
                      osc_export(cl2osc(obj))->exp_obd->obd_namespace,
                      NULL, resname, LDLM_EXTENT, 0);
            ldlm_resource_dump(D_ERROR, res);

            libcfs_debug_dumpstack(NULL);
            LBUG();
        }

        /* check for lockless io. */
        if (lock != NULL) {
            oa->o_handle = lock->l_remote_handle;
            oa->o_valid |= OBD_MD_FLHANDLE;
            LDLM_LOCK_PUT(lock);
        }
    }
}
Example #19
0
static int osc_io_setattr_start(const struct lu_env *env,
				const struct cl_io_slice *slice)
{
	struct cl_io	    *io     = slice->cis_io;
	struct osc_io	   *oio    = cl2osc_io(env, slice);
	struct cl_object	*obj    = slice->cis_obj;
	struct lov_oinfo	*loi    = cl2osc(obj)->oo_oinfo;
	struct cl_attr	  *attr   = &osc_env_info(env)->oti_attr;
	struct obdo	     *oa     = &oio->oi_oa;
	struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
	__u64		    size   = io->u.ci_setattr.sa_attr.lvb_size;
	unsigned int	     ia_valid = io->u.ci_setattr.sa_valid;
	int		      result = 0;
	struct obd_info	  oinfo = { { { 0 } } };

	/* truncate cache dirty pages first */
	if (cl_io_is_trunc(io))
		result = osc_cache_truncate_start(env, oio, cl2osc(obj), size);

	if (result == 0 && oio->oi_lockless == 0) {
		cl_object_attr_lock(obj);
		result = cl_object_attr_get(env, obj, attr);
		if (result == 0) {
			struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
			unsigned int cl_valid = 0;

			if (ia_valid & ATTR_SIZE) {
				attr->cat_size = attr->cat_kms = size;
				cl_valid = (CAT_SIZE | CAT_KMS);
			}
			if (ia_valid & ATTR_MTIME_SET) {
				attr->cat_mtime = lvb->lvb_mtime;
				cl_valid |= CAT_MTIME;
			}
			if (ia_valid & ATTR_ATIME_SET) {
				attr->cat_atime = lvb->lvb_atime;
				cl_valid |= CAT_ATIME;
			}
			if (ia_valid & ATTR_CTIME_SET) {
				attr->cat_ctime = lvb->lvb_ctime;
				cl_valid |= CAT_CTIME;
			}
			result = cl_object_attr_set(env, obj, attr, cl_valid);
		}
		cl_object_attr_unlock(obj);
	}
	memset(oa, 0, sizeof(*oa));
	if (result == 0) {
		oa->o_oi = loi->loi_oi;
		oa->o_mtime = attr->cat_mtime;
		oa->o_atime = attr->cat_atime;
		oa->o_ctime = attr->cat_ctime;
		oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
			OBD_MD_FLCTIME | OBD_MD_FLMTIME;
		if (ia_valid & ATTR_SIZE) {
			oa->o_size = size;
			oa->o_blocks = OBD_OBJECT_EOF;
			oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;

			if (oio->oi_lockless) {
				oa->o_flags = OBD_FL_SRVLOCK;
				oa->o_valid |= OBD_MD_FLFLAGS;
			}
		} else {
			LASSERT(oio->oi_lockless == 0);
		}

		oinfo.oi_oa = oa;
		oinfo.oi_capa = io->u.ci_setattr.sa_capa;
		init_completion(&cbargs->opc_sync);

		if (ia_valid & ATTR_SIZE)
			result = osc_punch_base(osc_export(cl2osc(obj)),
						&oinfo, osc_async_upcall,
						cbargs, PTLRPCD_SET);
		else
			result = osc_setattr_async_base(osc_export(cl2osc(obj)),
							&oinfo, NULL,
							osc_async_upcall,
							cbargs, PTLRPCD_SET);
		cbargs->opc_rpc_sent = result == 0;
	}
	return result;
}
Example #20
0
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int osc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct cl_object *obj = NULL;
	int result = 0;
	int discard;
	enum cl_lock_mode mode = CLM_READ;

	LASSERT(flag == LDLM_CB_CANCELING);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
		dlmlock->l_ast_data = NULL;
		unlock_res_and_lock(dlmlock);
		return 0;
	}

	discard = ldlm_is_discard_data(dlmlock);
	if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
		mode = CLM_WRITE;

	if (dlmlock->l_ast_data) {
		obj = osc2cl(dlmlock->l_ast_data);
		dlmlock->l_ast_data = NULL;

		cl_object_get(obj);
	}

	unlock_res_and_lock(dlmlock);

	/* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
	 * the object has been destroyed.
	 */
	if (obj) {
		struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
		struct cl_attr *attr = &osc_env_info(env)->oti_attr;
		__u64 old_kms;

		/* Destroy pages covered by the extent of the DLM lock */
		result = osc_lock_flush(cl2osc(obj),
					cl_index(obj, extent->start),
					cl_index(obj, extent->end),
					mode, discard);

		/* losing a lock, update kms */
		lock_res_and_lock(dlmlock);
		cl_object_attr_lock(obj);
		/* Must get the value under the lock to avoid race. */
		old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
		/* Update the kms. Need to loop all granted locks.
		 * Not a problem for the client
		 */
		attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);

		cl_object_attr_set(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
		unlock_res_and_lock(dlmlock);

		cl_object_put(env, obj);
	}
	return result;
}
Example #21
0
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int osc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct osc_lock *olck;
	struct cl_lock  *lock;
	int result;
	int cancel;

	LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);

	cancel = 0;
	olck = osc_ast_data_get(dlmlock);
	if (olck != NULL) {
		lock = olck->ols_cl.cls_lock;
		cl_lock_mutex_get(env, lock);
		LINVRNT(osc_lock_invariant(olck));
		if (olck->ols_ast_wait) {
			/* wake up osc_lock_use() */
			cl_lock_signal(env, lock);
			olck->ols_ast_wait = 0;
		}
		/*
		 * Lock might have been canceled while this thread was
		 * sleeping for lock mutex, but olck is pinned in memory.
		 */
		if (olck == dlmlock->l_ast_data) {
			/*
			 * NOTE: DLM sends blocking AST's for failed locks
			 *       (that are still in pre-OLS_GRANTED state)
			 *       too, and they have to be canceled otherwise
			 *       DLM lock is never destroyed and stuck in
			 *       the memory.
			 *
			 *       Alternatively, ldlm_cli_cancel() can be
			 *       called here directly for osc_locks with
			 *       ols_state < OLS_GRANTED to maintain an
			 *       invariant that ->clo_cancel() is only called
			 *       for locks that were granted.
			 */
			LASSERT(data == olck);
			osc_lock_blocking(env, dlmlock,
					  olck, flag == LDLM_CB_BLOCKING);
		} else
			cancel = 1;
		cl_lock_mutex_put(env, lock);
		osc_ast_data_put(env, olck);
	} else
		/*
		 * DLM lock exists, but there is no cl_lock attached to it.
		 * This is a `normal' race. cl_object and its cl_lock's can be
		 * removed by memory pressure, together with all pages.
		 */
		cancel = (flag == LDLM_CB_BLOCKING);

	if (cancel) {
		struct lustre_handle *lockh;

		lockh = &osc_env_info(env)->oti_handle;
		ldlm_lock2handle(dlmlock, lockh);
		result = ldlm_cli_cancel(lockh, LCF_ASYNC);
	} else
		result = 0;
	return result;
}