static int lovsub_attr_update(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned int valid) { struct lov_object *lov = cl2lovsub(obj)->lso_super; lov_r0(lov)->lo_attr_valid = 0; return 0; }
static int lovsub_attr_set(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned valid) { struct lov_object *lov = cl2lovsub(obj)->lso_super; ENTRY; lov_r0(lov)->lo_attr_valid = 0; RETURN(0); }
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, struct cl_page *page, struct page *vmpage) { struct lov_object *loo = cl2lov(obj); struct lov_layout_raid0 *r0 = lov_r0(loo); struct lov_io *lio = lov_env_io(env); struct cl_page *subpage; struct cl_object *subobj; struct lov_io_sub *sub; struct lov_page *lpg = cl_object_page_slice(obj, page); loff_t offset; u64 suboff; int stripe; int rc; offset = cl_offset(obj, page->cp_index); stripe = lov_stripe_number(loo->lo_lsm, offset); LASSERT(stripe < r0->lo_nr); rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff); LASSERT(rc == 0); lpg->lps_invalid = 1; cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops); sub = lov_sub_get(env, lio, stripe); if (IS_ERR(sub)) { rc = PTR_ERR(sub); goto out; } subobj = lovsub2cl(r0->lo_sub[stripe]); subpage = cl_page_find_sub(sub->sub_env, subobj, cl_index(subobj, suboff), vmpage, page); lov_sub_put(sub); if (IS_ERR(subpage)) { rc = PTR_ERR(subpage); goto out; } if (likely(subpage->cp_parent == page)) { lu_ref_add(&subpage->cp_reference, "lov", page); lpg->lps_invalid = 0; rc = 0; } else { CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n"); CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n"); LASSERT(0); } out: return rc; }
static int lov_find_cbdata_raid0(const struct lu_env *env, struct cl_object *obj, ldlm_iterator_t iter, void *data) { struct lov_object *lov = cl2lov(obj); struct lov_layout_raid0 *r0 = lov_r0(lov); struct cl_object *subobj; int i; int rc = 0; for (i = 0; i < r0->lo_nr; ++i) { if (r0->lo_sub[i] == NULL) continue; subobj = lovsub2cl(r0->lo_sub[i]); rc = cl_object_find_cbdata(env, subobj, iter, data); if (rc != 0) break; } return rc; }
static int lov_print_raid0(const struct lu_env *env, void *cookie, lu_printer_t p, const struct lu_object *o) { struct lov_object *lov = lu2lov(o); struct lov_layout_raid0 *r0 = lov_r0(lov); struct lov_stripe_md *lsm = lov->lo_lsm; int i; (*p)(env, cookie, "stripes: %d, %svalid, lsm{%p 0x%08X %d %u %u}: \n", r0->lo_nr, lov->lo_layout_invalid ? "in" : "", lsm, lsm->lsm_magic, atomic_read(&lsm->lsm_refc), lsm->lsm_stripe_count, lsm->lsm_layout_gen); for (i = 0; i < r0->lo_nr; ++i) { struct lu_object *sub; if (r0->lo_sub[i] != NULL) { sub = lovsub2lu(r0->lo_sub[i]); lu_object_print(env, cookie, p, sub); } else (*p)(env, cookie, "sub %d absent\n", i); } return 0; }
static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { struct lov_object *lov = cl2lov(obj); struct lov_layout_raid0 *r0 = lov_r0(lov); struct cl_attr *lov_attr = &r0->lo_attr; int result = 0; /* this is called w/o holding type guard mutex, so it must be inside * an on going IO otherwise lsm may be replaced. * LU-2117: it turns out there exists one exception. For mmaped files, * the lock of those files may be requested in the other file's IO * context, and this function is called in ccc_lock_state(), it will * hit this assertion. * Anyway, it's still okay to call attr_get w/o type guard as layout * can't go if locks exist. */ /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */ if (!r0->lo_attr_valid) { struct lov_stripe_md *lsm = lov->lo_lsm; struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb; __u64 kms = 0; memset(lvb, 0, sizeof(*lvb)); /* XXX: timestamps can be negative by sanity:test_39m, * how can it be? */ lvb->lvb_atime = LLONG_MIN; lvb->lvb_ctime = LLONG_MIN; lvb->lvb_mtime = LLONG_MIN; /* * XXX that should be replaced with a loop over sub-objects, * doing cl_object_attr_get() on them. But for now, let's * reuse old lov code. */ /* * XXX take lsm spin-lock to keep lov_merge_lvb_kms() * happy. It's not needed, because new code uses * ->coh_attr_guard spin-lock to protect consistency of * sub-object attributes. */ lov_stripe_lock(lsm); result = lov_merge_lvb_kms(lsm, lvb, &kms); lov_stripe_unlock(lsm); if (result == 0) { cl_lvb2attr(lov_attr, lvb); lov_attr->cat_kms = kms; r0->lo_attr_valid = 1; } } if (result == 0) { /* merge results */ attr->cat_blocks = lov_attr->cat_blocks; attr->cat_size = lov_attr->cat_size; attr->cat_kms = lov_attr->cat_kms; if (attr->cat_atime < lov_attr->cat_atime) attr->cat_atime = lov_attr->cat_atime; if (attr->cat_ctime < lov_attr->cat_ctime) attr->cat_ctime = lov_attr->cat_ctime; if (attr->cat_mtime < lov_attr->cat_mtime) attr->cat_mtime = lov_attr->cat_mtime; } return result; }
/** * Creates sub-locks for a given lov_lock for the first time. * * Goes through all sub-objects of top-object, and creates sub-locks on every * sub-object intersecting with top-lock extent. This is complicated by the * fact that top-lock (that is being created) can be accessed concurrently * through already created sub-locks (possibly shared with other top-locks). */ static struct lov_lock *lov_lock_sub_init(const struct lu_env *env, const struct cl_object *obj, struct cl_lock *lock) { int result = 0; int i; int nr; u64 start; u64 end; u64 file_start; u64 file_end; struct lov_object *loo = cl2lov(obj); struct lov_layout_raid0 *r0 = lov_r0(loo); struct lov_lock *lovlck; file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start); file_end = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1; for (i = 0, nr = 0; i < r0->lo_nr; i++) { /* * XXX for wide striping smarter algorithm is desirable, * breaking out of the loop, early. */ if (likely(r0->lo_sub[i]) && /* spare layout */ lov_stripe_intersects(loo->lo_lsm, i, file_start, file_end, &start, &end)) nr++; } LASSERT(nr > 0); lovlck = libcfs_kvzalloc(offsetof(struct lov_lock, lls_sub[nr]), GFP_NOFS); if (!lovlck) return ERR_PTR(-ENOMEM); lovlck->lls_nr = nr; for (i = 0, nr = 0; i < r0->lo_nr; ++i) { if (likely(r0->lo_sub[i]) && lov_stripe_intersects(loo->lo_lsm, i, file_start, file_end, &start, &end)) { struct lov_lock_sub *lls = &lovlck->lls_sub[nr]; struct cl_lock_descr *descr; descr = &lls->sub_lock.cll_descr; LASSERT(!descr->cld_obj); descr->cld_obj = lovsub2cl(r0->lo_sub[i]); descr->cld_start = cl_index(descr->cld_obj, start); descr->cld_end = cl_index(descr->cld_obj, end); descr->cld_mode = lock->cll_descr.cld_mode; descr->cld_gid = lock->cll_descr.cld_gid; descr->cld_enq_flags = lock->cll_descr.cld_enq_flags; lls->sub_stripe = i; /* initialize sub lock */ result = lov_sublock_init(env, lock, lls); if (result < 0) break; lls->sub_initialized = 1; nr++; } } LASSERT(ergo(result == 0, nr == lovlck->lls_nr)); if (result != 0) { for (i = 0; i < nr; ++i) { if (!lovlck->lls_sub[i].sub_initialized) break; cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock); } kvfree(lovlck); lovlck = ERR_PTR(result); } return lovlck; }