Exemple #1
0
/**
 * Prepare partially written-to page for a write.
 */
static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
                                   struct cl_page *pg)
{
    struct cl_attr *attr   = vvp_env_thread_attr(env);
    struct cl_object *obj  = io->ci_obj;
    struct vvp_page *vpg   = cl_object_page_slice(obj, pg);
    loff_t          offset = cl_offset(obj, vvp_index(vpg));
    int             result;

    cl_object_attr_lock(obj);
    result = cl_object_attr_get(env, obj, attr);
    cl_object_attr_unlock(obj);
    if (result == 0) {
        /*
         * If are writing to a new page, no need to read old data.
         * The extent locking will have updated the KMS, and for our
         * purposes here we can treat it like i_size.
         */
        if (attr->cat_kms <= offset) {
            char *kaddr = ll_kmap_atomic(vpg->vpg_page, KM_USER0);

            memset(kaddr, 0, cl_page_size(obj));
            ll_kunmap_atomic(kaddr, KM_USER0);
        } else if (vpg->vpg_defer_uptodate)
            vpg->vpg_ra_used = 1;
        else
            result = ll_page_sync_io(env, io, pg, CRT_READ);
    }
    return result;
}
Exemple #2
0
/**
 * Prepare partially written-to page for a write.
 */
static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
                                  struct cl_object *obj, struct cl_page *pg,
                                  struct ccc_page *cp,
                                  unsigned from, unsigned to)
{
        struct cl_attr *attr   = ccc_env_thread_attr(env);
        loff_t          offset = cl_offset(obj, pg->cp_index);
        int             result;

        cl_object_attr_lock(obj);
        result = cl_object_attr_get(env, obj, attr);
        cl_object_attr_unlock(obj);
        if (result == 0) {
                /*
                 * If are writing to a new page, no need to read old data.
                 * The extent locking will have updated the KMS, and for our
                 * purposes here we can treat it like i_size.
                 */
                if (attr->cat_kms <= offset) {
                        char *kaddr = ll_kmap_atomic(cp->cpg_page, KM_USER0);

                        memset(kaddr, 0, cl_page_size(obj));
                        ll_kunmap_atomic(kaddr, KM_USER0);
                } else if (cp->cpg_defer_uptodate)
                        cp->cpg_ra_used = 1;
                else
                        result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
                /*
                 * In older implementations, obdo_refresh_inode is called here
                 * to update the inode because the write might modify the
                 * object info at OST. However, this has been proven useless,
                 * since LVB functions will be called when user space program
                 * tries to retrieve inode attribute.  Also, see bug 15909 for
                 * details. -jay
                 */
                if (result == 0)
                        cl_page_export(env, pg, 1);
        }
        return result;
}
Exemple #3
0
ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
                           int rw, struct inode *inode,
                           struct ll_dio_pages *pv)
{
    struct cl_page    *clp;
    struct cl_2queue  *queue;
    struct cl_object  *obj = io->ci_obj;
    int i;
    ssize_t rc = 0;
    loff_t file_offset  = pv->ldp_start_offset;
    size_t size         = pv->ldp_size;
    int page_count      = pv->ldp_nr;
    struct page **pages = pv->ldp_pages;
    size_t page_size    = cl_page_size(obj);
    bool do_io;
    int  io_pages       = 0;
    ENTRY;

    queue = &io->ci_queue;
    cl_2queue_init(queue);
    for (i = 0; i < page_count; i++) {
        if (pv->ldp_offsets)
            file_offset = pv->ldp_offsets[i];

        LASSERT(!(file_offset & (page_size - 1)));
        clp = cl_page_find(env, obj, cl_index(obj, file_offset),
                           pv->ldp_pages[i], CPT_TRANSIENT);
        if (IS_ERR(clp)) {
            rc = PTR_ERR(clp);
            break;
        }

        rc = cl_page_own(env, io, clp);
        if (rc) {
            LASSERT(clp->cp_state == CPS_FREEING);
            cl_page_put(env, clp);
            break;
        }

        do_io = true;

        /* check the page type: if the page is a host page, then do
         * write directly */
        if (clp->cp_type == CPT_CACHEABLE) {
            struct page *vmpage = cl_page_vmpage(clp);
            struct page *src_page;
            struct page *dst_page;
            void       *src;
            void       *dst;

            src_page = (rw == WRITE) ? pages[i] : vmpage;
            dst_page = (rw == WRITE) ? vmpage : pages[i];

            src = ll_kmap_atomic(src_page, KM_USER0);
            dst = ll_kmap_atomic(dst_page, KM_USER1);
            memcpy(dst, src, min(page_size, size));
            ll_kunmap_atomic(dst, KM_USER1);
            ll_kunmap_atomic(src, KM_USER0);

            /* make sure page will be added to the transfer by
             * cl_io_submit()->...->vvp_page_prep_write(). */
            if (rw == WRITE)
                set_page_dirty(vmpage);

            if (rw == READ) {
                /* do not issue the page for read, since it
                 * may reread a ra page which has NOT uptodate
                 * bit set. */
                cl_page_disown(env, io, clp);
                do_io = false;
            }
        }

        if (likely(do_io)) {
            cl_2queue_add(queue, clp);

            /*
             * Set page clip to tell transfer formation engine
             * that page has to be sent even if it is beyond KMS.
             */
            cl_page_clip(env, clp, 0, min(size, page_size));

            ++io_pages;
        }

        /* drop the reference count for cl_page_find */
        cl_page_put(env, clp);
        size -= page_size;
        file_offset += page_size;
    }

    if (rc == 0 && io_pages) {
        rc = cl_io_submit_sync(env, io,
                               rw == READ ? CRT_READ : CRT_WRITE,
                               queue, 0);
    }
    if (rc == 0)
        rc = pv->ldp_size;

    cl_2queue_discard(env, io, queue);
    cl_2queue_disown(env, io, queue);
    cl_2queue_fini(env, queue);
    RETURN(rc);
}