Exemple #1
0
/**
 * Prepare partially written-to page for a write.
 */
static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
                                   struct cl_page *pg)
{
    struct cl_attr *attr   = vvp_env_thread_attr(env);
    struct cl_object *obj  = io->ci_obj;
    struct vvp_page *vpg   = cl_object_page_slice(obj, pg);
    loff_t          offset = cl_offset(obj, vvp_index(vpg));
    int             result;

    cl_object_attr_lock(obj);
    result = cl_object_attr_get(env, obj, attr);
    cl_object_attr_unlock(obj);
    if (result == 0) {
        /*
         * If are writing to a new page, no need to read old data.
         * The extent locking will have updated the KMS, and for our
         * purposes here we can treat it like i_size.
         */
        if (attr->cat_kms <= offset) {
            char *kaddr = ll_kmap_atomic(vpg->vpg_page, KM_USER0);

            memset(kaddr, 0, cl_page_size(obj));
            ll_kunmap_atomic(kaddr, KM_USER0);
        } else if (vpg->vpg_defer_uptodate)
            vpg->vpg_ra_used = 1;
        else
            result = ll_page_sync_io(env, io, pg, CRT_READ);
    }
    return result;
}
Exemple #2
0
int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
			struct cl_page *page, struct page *unused)
{
	struct lovsub_page *lsb = cl_object_page_slice(obj, page);

	cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
	return 0;
}
Exemple #3
0
int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
			struct cl_page *page, cfs_page_t *unused)
{
        struct lovsub_page *lsb = cl_object_page_slice(obj, page);
        ENTRY;

	cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
        RETURN(0);
}
Exemple #4
0
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
			struct cl_page *page, struct page *vmpage)
{
	struct lov_object *loo = cl2lov(obj);
	struct lov_layout_raid0 *r0 = lov_r0(loo);
	struct lov_io     *lio = lov_env_io(env);
	struct cl_page    *subpage;
	struct cl_object  *subobj;
	struct lov_io_sub *sub;
	struct lov_page   *lpg = cl_object_page_slice(obj, page);
	loff_t	     offset;
	u64	    suboff;
	int		stripe;
	int		rc;

	offset = cl_offset(obj, page->cp_index);
	stripe = lov_stripe_number(loo->lo_lsm, offset);
	LASSERT(stripe < r0->lo_nr);
	rc = lov_stripe_offset(loo->lo_lsm, offset, stripe,
				   &suboff);
	LASSERT(rc == 0);

	lpg->lps_invalid = 1;
	cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);

	sub = lov_sub_get(env, lio, stripe);
	if (IS_ERR(sub)) {
		rc = PTR_ERR(sub);
		goto out;
	}

	subobj = lovsub2cl(r0->lo_sub[stripe]);
	subpage = cl_page_find_sub(sub->sub_env, subobj,
				   cl_index(subobj, suboff), vmpage, page);
	lov_sub_put(sub);
	if (IS_ERR(subpage)) {
		rc = PTR_ERR(subpage);
		goto out;
	}

	if (likely(subpage->cp_parent == page)) {
		lu_ref_add(&subpage->cp_reference, "lov", page);
		lpg->lps_invalid = 0;
		rc = 0;
	} else {
		CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
		CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
		LASSERT(0);
	}

out:
	return rc;
}
Exemple #5
0
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
			struct cl_page *page, struct page *vmpage)
{
	struct lov_page *lpg = cl_object_page_slice(obj, page);
	void *addr;

	cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
	addr = kmap(vmpage);
	memset(addr, 0, cl_page_size(obj));
	kunmap(vmpage);
	cl_page_export(env, page, 1);
	return 0;
}
Exemple #6
0
static struct osc_page *osc_cl_page_osc(struct cl_page *page,
					struct osc_object *osc)
{
	const struct cl_page_slice *slice;

	if (osc != NULL)
		slice = cl_object_page_slice(&osc->oo_cl, page);
	else
		slice = cl_page_at(page, &osc_device_type);
	LASSERT(slice != NULL);

	return cl2osc_page(slice);
}
Exemple #7
0
static int slp_page_init(const struct lu_env *env, struct cl_object *obj,
			struct cl_page *page, struct page *vmpage)
{
        struct ccc_page *cpg = cl_object_page_slice(obj, page);

        CLOBINVRNT(env, obj, ccc_object_invariant(obj));

	cpg->cpg_page = vmpage;

	if (page->cp_type == CPT_CACHEABLE) {
		LBUG();
	} else {
		struct ccc_object *clobj = cl2ccc(obj);

		cl_page_slice_add(page, &cpg->cpg_cl, obj,
				&slp_transient_page_ops);
		clobj->cob_transient_pages++;
	}

        return 0;
}