Exemplo n.º 1
0
/**
 * Checks that there are no pages being written in the extent being truncated.
 */
static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
			  struct cl_page *page, void *cbdata)
{
	const struct cl_page_slice *slice;
	struct osc_page *ops;
	struct osc_async_page *oap;
	__u64 start = *(__u64 *)cbdata;

	slice = cl_page_at(page, &osc_device_type);
	LASSERT(slice != NULL);
	ops = cl2osc_page(slice);
	oap = &ops->ops_oap;

	if (oap->oap_cmd & OBD_BRW_WRITE &&
	    !list_empty(&oap->oap_pending_item))
		CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
				start, current->comm);

	{
		struct page *vmpage = cl_page_vmpage(env, page);

		if (PageLocked(vmpage))
			CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
			       ops, page->cp_index,
			       (oap->oap_cmd & OBD_BRW_RWMASK));
	}

	return CLP_GANG_OKAY;
}
Exemplo n.º 2
0
Arquivo: rw26.c Projeto: rread/lustre
ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
                           int rw, struct inode *inode,
                           struct ll_dio_pages *pv)
{
    struct cl_page    *clp;
    struct cl_2queue  *queue;
    struct cl_object  *obj = io->ci_obj;
    int i;
    ssize_t rc = 0;
    loff_t file_offset  = pv->ldp_start_offset;
    size_t size         = pv->ldp_size;
    int page_count      = pv->ldp_nr;
    struct page **pages = pv->ldp_pages;
    size_t page_size    = cl_page_size(obj);
    bool do_io;
    int  io_pages       = 0;
    ENTRY;

    queue = &io->ci_queue;
    cl_2queue_init(queue);
    for (i = 0; i < page_count; i++) {
        if (pv->ldp_offsets)
            file_offset = pv->ldp_offsets[i];

        LASSERT(!(file_offset & (page_size - 1)));
        clp = cl_page_find(env, obj, cl_index(obj, file_offset),
                           pv->ldp_pages[i], CPT_TRANSIENT);
        if (IS_ERR(clp)) {
            rc = PTR_ERR(clp);
            break;
        }

        rc = cl_page_own(env, io, clp);
        if (rc) {
            LASSERT(clp->cp_state == CPS_FREEING);
            cl_page_put(env, clp);
            break;
        }

        do_io = true;

        /* check the page type: if the page is a host page, then do
         * write directly */
        if (clp->cp_type == CPT_CACHEABLE) {
            struct page *vmpage = cl_page_vmpage(clp);
            struct page *src_page;
            struct page *dst_page;
            void       *src;
            void       *dst;

            src_page = (rw == WRITE) ? pages[i] : vmpage;
            dst_page = (rw == WRITE) ? vmpage : pages[i];

            src = ll_kmap_atomic(src_page, KM_USER0);
            dst = ll_kmap_atomic(dst_page, KM_USER1);
            memcpy(dst, src, min(page_size, size));
            ll_kunmap_atomic(dst, KM_USER1);
            ll_kunmap_atomic(src, KM_USER0);

            /* make sure page will be added to the transfer by
             * cl_io_submit()->...->vvp_page_prep_write(). */
            if (rw == WRITE)
                set_page_dirty(vmpage);

            if (rw == READ) {
                /* do not issue the page for read, since it
                 * may reread a ra page which has NOT uptodate
                 * bit set. */
                cl_page_disown(env, io, clp);
                do_io = false;
            }
        }

        if (likely(do_io)) {
            cl_2queue_add(queue, clp);

            /*
             * Set page clip to tell transfer formation engine
             * that page has to be sent even if it is beyond KMS.
             */
            cl_page_clip(env, clp, 0, min(size, page_size));

            ++io_pages;
        }

        /* drop the reference count for cl_page_find */
        cl_page_put(env, clp);
        size -= page_size;
        file_offset += page_size;
    }

    if (rc == 0 && io_pages) {
        rc = cl_io_submit_sync(env, io,
                               rw == READ ? CRT_READ : CRT_WRITE,
                               queue, 0);
    }
    if (rc == 0)
        rc = pv->ldp_size;

    cl_2queue_discard(env, io, queue);
    cl_2queue_disown(env, io, queue);
    cl_2queue_fini(env, queue);
    RETURN(rc);
}