Пример #1
0
/**
 * Implementation of struct cl_object_operations::coo_req_attr_set() for osc
 * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
 * fields.
 */
static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
                             struct cl_req_attr *attr)
{
    struct lov_oinfo *oinfo;
    struct obdo      *oa;
    struct ost_lvb   *lvb;
    u64		  flags = attr->cra_flags;

    oinfo   = cl2osc(obj)->oo_oinfo;
    lvb     = &oinfo->loi_lvb;
    oa      = attr->cra_oa;

    if ((flags & OBD_MD_FLMTIME) != 0) {
        oa->o_mtime = lvb->lvb_mtime;
        oa->o_valid |= OBD_MD_FLMTIME;
    }
    if ((flags & OBD_MD_FLATIME) != 0) {
        oa->o_atime = lvb->lvb_atime;
        oa->o_valid |= OBD_MD_FLATIME;
    }
    if ((flags & OBD_MD_FLCTIME) != 0) {
        oa->o_ctime = lvb->lvb_ctime;
        oa->o_valid |= OBD_MD_FLCTIME;
    }
    if (flags & OBD_MD_FLGROUP) {
        ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
        oa->o_valid |= OBD_MD_FLGROUP;
    }
    if (flags & OBD_MD_FLID) {
        ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
        oa->o_valid |= OBD_MD_FLID;
    }
    if (flags & OBD_MD_FLHANDLE) {
        struct ldlm_lock *lock;
        struct osc_page *opg;

        opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
        lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
                                    OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING);
        if (lock == NULL && !opg->ops_srvlock) {
            struct ldlm_resource *res;
            struct ldlm_res_id *resname;

            CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
                          "uncovered page!\n");

            resname = &osc_env_info(env)->oti_resname;
            ostid_build_res_name(&oinfo->loi_oi, resname);
            res = ldlm_resource_get(
                      osc_export(cl2osc(obj))->exp_obd->obd_namespace,
                      NULL, resname, LDLM_EXTENT, 0);
            ldlm_resource_dump(D_ERROR, res);

            libcfs_debug_dumpstack(NULL);
            LBUG();
        }

        /* check for lockless io. */
        if (lock != NULL) {
            oa->o_handle = lock->l_remote_handle;
            oa->o_valid |= OBD_MD_FLHANDLE;
            LDLM_LOCK_PUT(lock);
        }
    }
}
Пример #2
0
/**
 * Implementation of struct cl_req_operations::cro_attr_set() for osc
 * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
 * fields.
 */
static void osc_req_attr_set(const struct lu_env *env,
			     const struct cl_req_slice *slice,
			     const struct cl_object *obj,
			     struct cl_req_attr *attr, u64 flags)
{
	struct lov_oinfo *oinfo;
	struct cl_req    *clerq;
	struct cl_page   *apage; /* _some_ page in @clerq */
	struct cl_lock   *lock;  /* _some_ lock protecting @apage */
	struct osc_lock  *olck;
	struct osc_page  *opg;
	struct obdo      *oa;
	struct ost_lvb   *lvb;

	oinfo	= cl2osc(obj)->oo_oinfo;
	lvb	= &oinfo->loi_lvb;
	oa	= attr->cra_oa;

	if ((flags & OBD_MD_FLMTIME) != 0) {
		oa->o_mtime = lvb->lvb_mtime;
		oa->o_valid |= OBD_MD_FLMTIME;
	}
	if ((flags & OBD_MD_FLATIME) != 0) {
		oa->o_atime = lvb->lvb_atime;
		oa->o_valid |= OBD_MD_FLATIME;
	}
	if ((flags & OBD_MD_FLCTIME) != 0) {
		oa->o_ctime = lvb->lvb_ctime;
		oa->o_valid |= OBD_MD_FLCTIME;
	}
	if (flags & OBD_MD_FLGROUP) {
		ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
		oa->o_valid |= OBD_MD_FLGROUP;
	}
	if (flags & OBD_MD_FLID) {
		ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
		oa->o_valid |= OBD_MD_FLID;
	}
	if (flags & OBD_MD_FLHANDLE) {
		clerq = slice->crs_req;
		LASSERT(!list_empty(&clerq->crq_pages));
		apage = container_of(clerq->crq_pages.next,
				     struct cl_page, cp_flight);
		opg = osc_cl_page_osc(apage);
		apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
		lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
		if (lock == NULL) {
			struct cl_object_header *head;
			struct cl_lock	  *scan;

			head = cl_object_header(apage->cp_obj);
			list_for_each_entry(scan, &head->coh_locks,
						cll_linkage)
				CL_LOCK_DEBUG(D_ERROR, env, scan,
					      "no cover page!\n");
			CL_PAGE_DEBUG(D_ERROR, env, apage,
				      "dump uncover page!\n");
			dump_stack();
			LBUG();
		}

		olck = osc_lock_at(lock);
		LASSERT(olck != NULL);
		LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL));
		/* check for lockless io. */
		if (olck->ols_lock != NULL) {
			oa->o_handle = olck->ols_lock->l_remote_handle;
			oa->o_valid |= OBD_MD_FLHANDLE;
		}
		cl_lock_put(env, lock);
	}
Пример #3
0
/**
 * An implementation of cl_io_operations::cio_io_submit() method for osc
 * layer. Iterates over pages in the in-queue, prepares each for io by calling
 * cl_page_prep() and then either submits them through osc_io_submit_page()
 * or, if page is already submitted, changes osc flags through
 * osc_set_async_flags().
 */
static int osc_io_submit(const struct lu_env *env,
			 const struct cl_io_slice *ios,
			 enum cl_req_type crt, struct cl_2queue *queue)
{
	struct cl_page    *page;
	struct cl_page    *tmp;
	struct client_obd *cli  = NULL;
	struct osc_object *osc  = NULL; /* to keep gcc happy */
	struct osc_page   *opg;
	struct cl_io      *io;
	LIST_HEAD(list);

	struct cl_page_list *qin      = &queue->c2_qin;
	struct cl_page_list *qout     = &queue->c2_qout;
	int queued = 0;
	int result = 0;
	int cmd;
	int brw_flags;
	int max_pages;

	LASSERT(qin->pl_nr > 0);

	CDEBUG(D_CACHE, "%d %d\n", qin->pl_nr, crt);

	osc = cl2osc(ios->cis_obj);
	cli = osc_cli(osc);
	max_pages = cli->cl_max_pages_per_rpc;

	cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
	brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;

	/*
	 * NOTE: here @page is a top-level page. This is done to avoid
	 *       creation of sub-page-list.
	 */
	cl_page_list_for_each_safe(page, tmp, qin) {
		struct osc_async_page *oap;

		/* Top level IO. */
		io = page->cp_owner;
		LASSERT(io != NULL);

		opg = osc_cl_page_osc(page);
		oap = &opg->ops_oap;
		LASSERT(osc == oap->oap_obj);

		if (!list_empty(&oap->oap_pending_item) ||
		    !list_empty(&oap->oap_rpc_item)) {
			CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
			       oap, opg);
			result = -EBUSY;
			break;
		}

		result = cl_page_prep(env, io, page, crt);
		if (result != 0) {
			LASSERT(result < 0);
			if (result != -EALREADY)
				break;
			/*
			 * Handle -EALREADY error: for read case, the page is
			 * already in UPTODATE state; for write, the page
			 * is not dirty.
			 */
			result = 0;
			continue;
		}

		cl_page_list_move(qout, qin, page);
		oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
		oap->oap_async_flags |= ASYNC_COUNT_STABLE;

		osc_page_submit(env, opg, crt, brw_flags);
		list_add_tail(&oap->oap_pending_item, &list);
		if (++queued == max_pages) {
			queued = 0;
			result = osc_queue_sync_pages(env, osc, &list, cmd,
						      brw_flags);
			if (result < 0)
				break;
		}
	}

	if (queued > 0)
		result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags);

	CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
	return qout->pl_nr > 0 ? 0 : result;
}
Пример #4
0
static int osc_io_commit_async(const struct lu_env *env,
				const struct cl_io_slice *ios,
				struct cl_page_list *qin, int from, int to,
				cl_commit_cbt cb)
{
	struct cl_io    *io = ios->cis_io;
	struct osc_io   *oio = cl2osc_io(env, ios);
	struct osc_object *osc = cl2osc(ios->cis_obj);
	struct cl_page  *page;
	struct cl_page  *last_page;
	struct osc_page *opg;
	int result = 0;
	ENTRY;

	LASSERT(qin->pl_nr > 0);

	/* Handle partial page cases */
	last_page = cl_page_list_last(qin);
	if (oio->oi_lockless) {
		page = cl_page_list_first(qin);
		if (page == last_page) {
			cl_page_clip(env, page, from, to);
		} else {
			if (from != 0)
				cl_page_clip(env, page, from, PAGE_SIZE);
			if (to != PAGE_SIZE)
				cl_page_clip(env, last_page, 0, to);
		}
	}

	while (qin->pl_nr > 0) {
		struct osc_async_page *oap;

		page = cl_page_list_first(qin);
		opg = osc_cl_page_osc(page, osc);
		oap = &opg->ops_oap;

		if (!list_empty(&oap->oap_rpc_item)) {
			CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
			       oap, opg);
			result = -EBUSY;
			break;
		}

		/* The page may be already in dirty cache. */
		if (list_empty(&oap->oap_pending_item)) {
			result = osc_page_cache_add(env, &opg->ops_cl, io);
			if (result != 0)
				break;
		}

		osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
				  page == last_page ? to : PAGE_SIZE);

		cl_page_list_del(env, qin, page);

		(*cb)(env, io, page);
		/* Can't access page any more. Page can be in transfer and
		 * complete at any time. */
	}

	/* for sync write, kernel will wait for this page to be flushed before
	 * osc_io_end() is called, so release it earlier.
	 * for mkwrite(), it's known there is no further pages. */
	if (cl_io_is_sync_write(io) && oio->oi_active != NULL) {
		osc_extent_release(env, oio->oi_active);
		oio->oi_active = NULL;
	}

	CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
	RETURN(result);
}