/** * Updates object attributes from a lock value block (lvb) received together * with the DLM lock reply from the server. Copy of osc_update_enqueue() * logic. * * This can be optimized to not update attributes when lock is a result of a * local match. * * Called under lock and resource spin-locks. */ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck, int rc) { struct ost_lvb *lvb; struct cl_object *obj; struct lov_oinfo *oinfo; struct cl_attr *attr; unsigned valid; if (!(olck->ols_flags & LDLM_FL_LVB_READY)) return; lvb = &olck->ols_lvb; obj = olck->ols_cl.cls_obj; oinfo = cl2osc(obj)->oo_oinfo; attr = &osc_env_info(env)->oti_attr; valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE; cl_lvb2attr(attr, lvb); cl_object_attr_lock(obj); if (rc == 0) { struct ldlm_lock *dlmlock; __u64 size; dlmlock = olck->ols_lock; LASSERT(dlmlock != NULL); /* re-grab LVB from a dlm lock under DLM spin-locks. */ *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; size = lvb->lvb_size; /* Extend KMS up to the end of this lock and no further * A lock on [x,y] means a KMS of up to y + 1 bytes! */ if (size > dlmlock->l_policy_data.l_extent.end) size = dlmlock->l_policy_data.l_extent.end + 1; if (size >= oinfo->loi_kms) { LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64 ", kms="LPU64, lvb->lvb_size, size); valid |= CAT_KMS; attr->cat_kms = size; } else { LDLM_DEBUG(dlmlock, "lock acquired, setting rss=" LPU64"; leaving kms="LPU64", end="LPU64, lvb->lvb_size, oinfo->loi_kms, dlmlock->l_policy_data.l_extent.end); } ldlm_lock_allow_match_locked(dlmlock); } else if (rc == -ENAVAIL && olck->ols_glimpse) { CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving" " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms); } else valid = 0; if (valid != 0) cl_object_attr_set(env, obj, attr, valid); cl_object_attr_unlock(obj); }
static int osc_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo; cl_lvb2attr(attr, &oinfo->loi_lvb); attr->cat_kms = oinfo->loi_kms_valid ? oinfo->loi_kms : 0; return 0; }
/** * Updates object attributes from a lock value block (lvb) received together * with the DLM lock reply from the server. Copy of osc_update_enqueue() * logic. * * This can be optimized to not update attributes when lock is a result of a * local match. * * Called under lock and resource spin-locks. */ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_object *osc, struct ldlm_lock *dlmlock, struct ost_lvb *lvb) { struct cl_object *obj = osc2cl(osc); struct lov_oinfo *oinfo = osc->oo_oinfo; struct cl_attr *attr = &osc_env_info(env)->oti_attr; unsigned valid; ENTRY; valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE; if (lvb == NULL) { LASSERT(dlmlock != NULL); lvb = dlmlock->l_lvb_data; } cl_lvb2attr(attr, lvb); cl_object_attr_lock(obj); if (dlmlock != NULL) { __u64 size; check_res_locked(dlmlock->l_resource); LASSERT(lvb == dlmlock->l_lvb_data); size = lvb->lvb_size; /* Extend KMS up to the end of this lock and no further * A lock on [x,y] means a KMS of up to y + 1 bytes! */ if (size > dlmlock->l_policy_data.l_extent.end) size = dlmlock->l_policy_data.l_extent.end + 1; if (size >= oinfo->loi_kms) { LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu" ", kms=%llu", lvb->lvb_size, size); valid |= CAT_KMS; attr->cat_kms = size; } else { LDLM_DEBUG(dlmlock, "lock acquired, setting rss=" "%llu; leaving kms=%llu, end=%llu", lvb->lvb_size, oinfo->loi_kms, dlmlock->l_policy_data.l_extent.end); } ldlm_lock_allow_match_locked(dlmlock); } cl_object_attr_update(env, obj, attr, valid); cl_object_attr_unlock(obj); EXIT; }
static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { struct lov_object *lov = cl2lov(obj); struct lov_layout_raid0 *r0 = lov_r0(lov); struct cl_attr *lov_attr = &r0->lo_attr; int result = 0; /* this is called w/o holding type guard mutex, so it must be inside * an on going IO otherwise lsm may be replaced. * LU-2117: it turns out there exists one exception. For mmaped files, * the lock of those files may be requested in the other file's IO * context, and this function is called in ccc_lock_state(), it will * hit this assertion. * Anyway, it's still okay to call attr_get w/o type guard as layout * can't go if locks exist. */ /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */ if (!r0->lo_attr_valid) { struct lov_stripe_md *lsm = lov->lo_lsm; struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb; __u64 kms = 0; memset(lvb, 0, sizeof(*lvb)); /* XXX: timestamps can be negative by sanity:test_39m, * how can it be? */ lvb->lvb_atime = LLONG_MIN; lvb->lvb_ctime = LLONG_MIN; lvb->lvb_mtime = LLONG_MIN; /* * XXX that should be replaced with a loop over sub-objects, * doing cl_object_attr_get() on them. But for now, let's * reuse old lov code. */ /* * XXX take lsm spin-lock to keep lov_merge_lvb_kms() * happy. It's not needed, because new code uses * ->coh_attr_guard spin-lock to protect consistency of * sub-object attributes. */ lov_stripe_lock(lsm); result = lov_merge_lvb_kms(lsm, lvb, &kms); lov_stripe_unlock(lsm); if (result == 0) { cl_lvb2attr(lov_attr, lvb); lov_attr->cat_kms = kms; r0->lo_attr_valid = 1; } } if (result == 0) { /* merge results */ attr->cat_blocks = lov_attr->cat_blocks; attr->cat_size = lov_attr->cat_size; attr->cat_kms = lov_attr->cat_kms; if (attr->cat_atime < lov_attr->cat_atime) attr->cat_atime = lov_attr->cat_atime; if (attr->cat_ctime < lov_attr->cat_ctime) attr->cat_ctime = lov_attr->cat_ctime; if (attr->cat_mtime < lov_attr->cat_mtime) attr->cat_mtime = lov_attr->cat_mtime; } return result; }