예제 #1
0
파일: osc_lock.c 프로젝트: AK101111/linux
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
			     struct lustre_handle *lockh, bool lvb_update)
{
	struct ldlm_lock *dlmlock;

	dlmlock = ldlm_handle2lock_long(lockh, 0);
	LASSERT(dlmlock);

	/* lock reference taken by ldlm_handle2lock_long() is
	 * owned by osc_lock and released in osc_lock_detach()
	 */
	lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
	oscl->ols_has_ref = 1;

	LASSERT(!oscl->ols_dlmlock);
	oscl->ols_dlmlock = dlmlock;

	/* This may be a matched lock for glimpse request, do not hold
	 * lock reference in that case.
	 */
	if (!oscl->ols_glimpse) {
		/* hold a refc for non glimpse lock which will
		 * be released in osc_lock_cancel()
		 */
		lustre_handle_copy(&oscl->ols_handle, lockh);
		ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
		oscl->ols_hold = 1;
	}

	/* Lock must have been granted. */
	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
		struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
		struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;

		/* extend the lock extent, otherwise it will have problem when
		 * we decide whether to grant a lockless lock.
		 */
		descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
		descr->cld_start = cl_index(descr->cld_obj, ext->start);
		descr->cld_end = cl_index(descr->cld_obj, ext->end);
		descr->cld_gid = ext->gid;

		/* no lvb update for matched lock */
		if (lvb_update) {
			LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
			osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
					    dlmlock, NULL);
		}
		LINVRNT(osc_lock_invariant(oscl));
	}
	unlock_res_and_lock(dlmlock);

	LASSERT(oscl->ols_state != OLS_GRANTED);
	oscl->ols_state = OLS_GRANTED;
}
예제 #2
0
static int osc_io_read_ahead(const struct lu_env *env,
			     const struct cl_io_slice *ios,
			     pgoff_t start, struct cl_read_ahead *ra)
{
	struct osc_object	*osc = cl2osc(ios->cis_obj);
	struct ldlm_lock	*dlmlock;
	int			result = -ENODATA;
	ENTRY;

	dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
	if (dlmlock != NULL) {
		if (dlmlock->l_req_mode != LCK_PR) {
			struct lustre_handle lockh;
			ldlm_lock2handle(dlmlock, &lockh);
			ldlm_lock_addref(&lockh, LCK_PR);
			ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
		}

		ra->cra_end = cl_index(osc2cl(osc),
				       dlmlock->l_policy_data.l_extent.end);
		ra->cra_release = osc_read_ahead_release;
		ra->cra_cbdata = dlmlock;
		result = 0;
	}

	RETURN(result);
}
예제 #3
0
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int mdc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct cl_object *obj = NULL;
	int result = 0;
	bool discard;
	enum cl_lock_mode mode = CLM_READ;

	ENTRY;

	LASSERT(flag == LDLM_CB_CANCELING);
	LASSERT(dlmlock != NULL);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
		dlmlock->l_ast_data = NULL;
		unlock_res_and_lock(dlmlock);
		RETURN(0);
	}

	discard = ldlm_is_discard_data(dlmlock);
	if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
		mode = CLM_WRITE;

	if (dlmlock->l_ast_data != NULL) {
		obj = osc2cl(dlmlock->l_ast_data);
		dlmlock->l_ast_data = NULL;
		cl_object_get(obj);
	}
	ldlm_set_kms_ignore(dlmlock);
	unlock_res_and_lock(dlmlock);

	/* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
	 * the object has been destroyed. */
	if (obj != NULL) {
		struct cl_attr *attr = &osc_env_info(env)->oti_attr;

		/* Destroy pages covered by the extent of the DLM lock */
		result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
					CL_PAGE_EOF, mode, discard);
		/* Losing a lock, set KMS to 0.
		 * NB: assumed that DOM lock covers whole data on MDT.
		 */
		/* losing a lock, update kms */
		lock_res_and_lock(dlmlock);
		cl_object_attr_lock(obj);
		attr->cat_kms = 0;
		cl_object_attr_update(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
		unlock_res_and_lock(dlmlock);
		cl_object_put(env, obj);
	}
	RETURN(result);
}
예제 #4
0
/**
 * Called when a lock is granted, from an upcall (when server returned a
 * granted lock), or from completion AST, when server returned a blocked lock.
 *
 * Called under lock and resource spin-locks, that are released temporarily
 * here.
 */
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
			     struct ldlm_lock *dlmlock, int rc)
{
	struct ldlm_extent   *ext;
	struct cl_lock       *lock;
	struct cl_lock_descr *descr;

	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	ENTRY;
	if (olck->ols_state < OLS_GRANTED) {
		lock  = olck->ols_cl.cls_lock;
		ext   = &dlmlock->l_policy_data.l_extent;
		descr = &osc_env_info(env)->oti_descr;
		descr->cld_obj = lock->cll_descr.cld_obj;

		/* XXX check that ->l_granted_mode is valid. */
		descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
		descr->cld_start = cl_index(descr->cld_obj, ext->start);
		descr->cld_end   = cl_index(descr->cld_obj, ext->end);
		descr->cld_gid   = ext->gid;
		/*
		 * tell upper layers the extent of the lock that was actually
		 * granted
		 */
		olck->ols_state = OLS_GRANTED;
		osc_lock_lvb_update(env, olck, rc);

		/* release DLM spin-locks to allow cl_lock_{modify,signal}()
		 * to take a semaphore on a parent lock. This is safe, because
		 * spin-locks are needed to protect consistency of
		 * dlmlock->l_*_mode and LVB, and we have finished processing
		 * them. */
		unlock_res_and_lock(dlmlock);
		cl_lock_modify(env, lock, descr);
		cl_lock_signal(env, lock);
		LINVRNT(osc_lock_invariant(olck));
		lock_res_and_lock(dlmlock);
	}
	EXIT;
}
예제 #5
0
파일: osc_io.c 프로젝트: 383530895/linux
static void osc_io_fsync_end(const struct lu_env *env,
			     const struct cl_io_slice *slice)
{
	struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
	struct cl_object   *obj = slice->cis_obj;
	pgoff_t start = cl_index(obj, fio->fi_start);
	pgoff_t end   = cl_index(obj, fio->fi_end);
	int result = 0;

	if (fio->fi_mode == CL_FSYNC_LOCAL) {
		result = osc_cache_wait_range(env, cl2osc(obj), start, end);
	} else if (fio->fi_mode == CL_FSYNC_ALL) {
		struct osc_io	   *oio    = cl2osc_io(env, slice);
		struct osc_async_cbargs *cbargs = &oio->oi_cbarg;

		wait_for_completion(&cbargs->opc_sync);
		if (result == 0)
			result = cbargs->opc_rc;
	}
	slice->cis_io->ci_result = result;
}
예제 #6
0
파일: lov_page.c 프로젝트: Chong-Li/cse522
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
			struct cl_page *page, struct page *vmpage)
{
	struct lov_object *loo = cl2lov(obj);
	struct lov_layout_raid0 *r0 = lov_r0(loo);
	struct lov_io     *lio = lov_env_io(env);
	struct cl_page    *subpage;
	struct cl_object  *subobj;
	struct lov_io_sub *sub;
	struct lov_page   *lpg = cl_object_page_slice(obj, page);
	loff_t	     offset;
	u64	    suboff;
	int		stripe;
	int		rc;

	offset = cl_offset(obj, page->cp_index);
	stripe = lov_stripe_number(loo->lo_lsm, offset);
	LASSERT(stripe < r0->lo_nr);
	rc = lov_stripe_offset(loo->lo_lsm, offset, stripe,
				   &suboff);
	LASSERT(rc == 0);

	lpg->lps_invalid = 1;
	cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);

	sub = lov_sub_get(env, lio, stripe);
	if (IS_ERR(sub)) {
		rc = PTR_ERR(sub);
		goto out;
	}

	subobj = lovsub2cl(r0->lo_sub[stripe]);
	subpage = cl_page_find_sub(sub->sub_env, subobj,
				   cl_index(subobj, suboff), vmpage, page);
	lov_sub_put(sub);
	if (IS_ERR(subpage)) {
		rc = PTR_ERR(subpage);
		goto out;
	}

	if (likely(subpage->cp_parent == page)) {
		lu_ref_add(&subpage->cp_reference, "lov", page);
		lpg->lps_invalid = 0;
		rc = 0;
	} else {
		CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
		CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
		LASSERT(0);
	}

out:
	return rc;
}
예제 #7
0
static int osc_io_fsync_start(const struct lu_env *env,
			      const struct cl_io_slice *slice)
{
	struct cl_io       *io  = slice->cis_io;
	struct cl_fsync_io *fio = &io->u.ci_fsync;
	struct cl_object   *obj = slice->cis_obj;
	struct osc_object  *osc = cl2osc(obj);
	pgoff_t start  = cl_index(obj, fio->fi_start);
	pgoff_t end    = cl_index(obj, fio->fi_end);
	int     result = 0;
	ENTRY;

	if (fio->fi_end == OBD_OBJECT_EOF)
		end = CL_PAGE_EOF;

	result = osc_cache_writeback_range(env, osc, start, end, 0,
					   fio->fi_mode == CL_FSYNC_DISCARD);
	if (result > 0) {
		fio->fi_nr_written += result;
		result = 0;
	}
	if (fio->fi_mode == CL_FSYNC_ALL) {
		int rc;

		/* we have to wait for writeback to finish before we can
		 * send OST_SYNC RPC. This is bad because it causes extents
		 * to be written osc by osc. However, we usually start
		 * writeback before CL_FSYNC_ALL so this won't have any real
		 * problem. */
		rc = osc_cache_wait_range(env, osc, start, end);
		if (result == 0)
			result = rc;
		rc = osc_fsync_ost(env, osc, fio);
		if (result == 0)
			result = rc;
	}

	RETURN(result);
}
예제 #8
0
파일: osc_io.c 프로젝트: 383530895/linux
static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
			    struct osc_io *oio, __u64 size)
{
	struct cl_object *clob;
	int     partial;
	pgoff_t start;

	clob    = oio->oi_cl.cis_obj;
	start   = cl_index(clob, size);
	partial = cl_offset(clob, start) < size;

	/*
	 * Complain if there are pages in the truncated region.
	 */
	cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF,
			    trunc_check_cb, (void *)&size);
}
예제 #9
0
파일: rw26.c 프로젝트: rread/lustre
ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
                           int rw, struct inode *inode,
                           struct ll_dio_pages *pv)
{
    struct cl_page    *clp;
    struct cl_2queue  *queue;
    struct cl_object  *obj = io->ci_obj;
    int i;
    ssize_t rc = 0;
    loff_t file_offset  = pv->ldp_start_offset;
    size_t size         = pv->ldp_size;
    int page_count      = pv->ldp_nr;
    struct page **pages = pv->ldp_pages;
    size_t page_size    = cl_page_size(obj);
    bool do_io;
    int  io_pages       = 0;
    ENTRY;

    queue = &io->ci_queue;
    cl_2queue_init(queue);
    for (i = 0; i < page_count; i++) {
        if (pv->ldp_offsets)
            file_offset = pv->ldp_offsets[i];

        LASSERT(!(file_offset & (page_size - 1)));
        clp = cl_page_find(env, obj, cl_index(obj, file_offset),
                           pv->ldp_pages[i], CPT_TRANSIENT);
        if (IS_ERR(clp)) {
            rc = PTR_ERR(clp);
            break;
        }

        rc = cl_page_own(env, io, clp);
        if (rc) {
            LASSERT(clp->cp_state == CPS_FREEING);
            cl_page_put(env, clp);
            break;
        }

        do_io = true;

        /* check the page type: if the page is a host page, then do
         * write directly */
        if (clp->cp_type == CPT_CACHEABLE) {
            struct page *vmpage = cl_page_vmpage(clp);
            struct page *src_page;
            struct page *dst_page;
            void       *src;
            void       *dst;

            src_page = (rw == WRITE) ? pages[i] : vmpage;
            dst_page = (rw == WRITE) ? vmpage : pages[i];

            src = ll_kmap_atomic(src_page, KM_USER0);
            dst = ll_kmap_atomic(dst_page, KM_USER1);
            memcpy(dst, src, min(page_size, size));
            ll_kunmap_atomic(dst, KM_USER1);
            ll_kunmap_atomic(src, KM_USER0);

            /* make sure page will be added to the transfer by
             * cl_io_submit()->...->vvp_page_prep_write(). */
            if (rw == WRITE)
                set_page_dirty(vmpage);

            if (rw == READ) {
                /* do not issue the page for read, since it
                 * may reread a ra page which has NOT uptodate
                 * bit set. */
                cl_page_disown(env, io, clp);
                do_io = false;
            }
        }

        if (likely(do_io)) {
            cl_2queue_add(queue, clp);

            /*
             * Set page clip to tell transfer formation engine
             * that page has to be sent even if it is beyond KMS.
             */
            cl_page_clip(env, clp, 0, min(size, page_size));

            ++io_pages;
        }

        /* drop the reference count for cl_page_find */
        cl_page_put(env, clp);
        size -= page_size;
        file_offset += page_size;
    }

    if (rc == 0 && io_pages) {
        rc = cl_io_submit_sync(env, io,
                               rw == READ ? CRT_READ : CRT_WRITE,
                               queue, 0);
    }
    if (rc == 0)
        rc = pv->ldp_size;

    cl_2queue_discard(env, io, queue);
    cl_2queue_disown(env, io, queue);
    cl_2queue_fini(env, queue);
    RETURN(rc);
}
예제 #10
0
static int llu_queue_pio(const struct lu_env *env, struct cl_io *io,
                         struct llu_io_group *group,
                         char *buf, size_t count, loff_t pos)
{
        struct cl_object *obj = io->ci_obj;
        struct inode *inode = ccc_object_inode(obj);
        struct intnl_stat *st = llu_i2stat(inode);
        struct obd_export *exp = llu_i2obdexp(inode);
        struct page *page;
        int  rc = 0, ret_bytes = 0;
        struct cl_page *clp;
        struct cl_2queue *queue;
        ENTRY;

        if (!exp)
                RETURN(-EINVAL);

        queue = &io->ci_queue;
        cl_2queue_init(queue);


        /* prepare the pages array */
        do {
                unsigned long index, offset, bytes;

                offset = (pos & ~CFS_PAGE_MASK);
		index = pos >> PAGE_CACHE_SHIFT;
		bytes = PAGE_CACHE_SIZE - offset;
                if (bytes > count)
                        bytes = count;

                /* prevent read beyond file range */
                if (/* local_lock && */
                    io->ci_type == CIT_READ && pos + bytes >= st->st_size) {
                        if (pos >= st->st_size)
                                break;
                        bytes = st->st_size - pos;
                }

                /* prepare page for this index */
                page = llu_get_user_page(index, buf - offset, offset, bytes);
                if (!page) {
                        rc = -ENOMEM;
                        break;
                }

                clp = cl_page_find(env, obj,
                                   cl_index(obj, pos),
                                   page, CPT_TRANSIENT);

                if (IS_ERR(clp)) {
                        rc = PTR_ERR(clp);
                        break;
                }

                rc = cl_page_own(env, io, clp);
                if (rc) {
                        LASSERT(clp->cp_state == CPS_FREEING);
                        cl_page_put(env, clp);
                        break;
                }

                cl_2queue_add(queue, clp);

                /* drop the reference count for cl_page_find, so that the page
                 * will be freed in cl_2queue_fini. */
                cl_page_put(env, clp);

                cl_page_clip(env, clp, offset, offset+bytes);

                count -= bytes;
                pos += bytes;
                buf += bytes;

                group->lig_rwcount += bytes;
                ret_bytes += bytes;
                page++;
        } while (count);

        if (rc == 0) {
                enum cl_req_type iot;
                iot = io->ci_type == CIT_READ ? CRT_READ : CRT_WRITE;
		rc = cl_io_submit_sync(env, io, iot, queue, 0);
        }

        group->lig_rc = rc;

        cl_2queue_discard(env, io, queue);
        cl_2queue_disown(env, io, queue);
        cl_2queue_fini(env, queue);

        RETURN(ret_bytes);
}
예제 #11
0
static int vvp_io_fault_start(const struct lu_env *env,
                              const struct cl_io_slice *ios)
{
	struct vvp_io       *vio     = cl2vvp_io(env, ios);
	struct cl_io        *io      = ios->cis_io;
	struct cl_object    *obj     = io->ci_obj;
	struct inode        *inode   = ccc_object_inode(obj);
	struct cl_fault_io  *fio     = &io->u.ci_fault;
	struct vvp_fault_io *cfio    = &vio->u.fault;
	loff_t               offset;
	int                  result  = 0;
	struct page          *vmpage  = NULL;
	struct cl_page      *page;
	loff_t               size;
	pgoff_t              last; /* last page in a file data region */

        if (fio->ft_executable &&
            LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
                CWARN("binary "DFID
                      " changed while waiting for the page fault lock\n",
                      PFID(lu_object_fid(&obj->co_lu)));

        /* offset of the last byte on the page */
        offset = cl_offset(obj, fio->ft_index + 1) - 1;
        LASSERT(cl_index(obj, offset) == fio->ft_index);
        result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
        if (result != 0)
                return result;

	/* must return locked page */
	if (fio->ft_mkwrite) {
		LASSERT(cfio->ft_vmpage != NULL);
		lock_page(cfio->ft_vmpage);
	} else {
		result = vvp_io_kernel_fault(cfio);
		if (result != 0)
			return result;
	}

	vmpage = cfio->ft_vmpage;
	LASSERT(PageLocked(vmpage));

	if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
		ll_invalidate_page(vmpage);

	size = i_size_read(inode);
        /* Though we have already held a cl_lock upon this page, but
         * it still can be truncated locally. */
	if (unlikely((vmpage->mapping != inode->i_mapping) ||
		     (page_offset(vmpage) > size))) {
                CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");

                /* return +1 to stop cl_io_loop() and ll_fault() will catch
                 * and retry. */
                GOTO(out, result = +1);
        }


	if (fio->ft_mkwrite ) {
		pgoff_t last_index;
		/*
		 * Capture the size while holding the lli_trunc_sem from above
		 * we want to make sure that we complete the mkwrite action
		 * while holding this lock. We need to make sure that we are
		 * not past the end of the file.
		 */
		last_index = cl_index(obj, size - 1);
		if (last_index < fio->ft_index) {
			CDEBUG(D_PAGE,
				"llite: mkwrite and truncate race happened: "
				"%p: 0x%lx 0x%lx\n",
				vmpage->mapping,fio->ft_index,last_index);
			/*
			 * We need to return if we are
			 * passed the end of the file. This will propagate
			 * up the call stack to ll_page_mkwrite where
			 * we will return VM_FAULT_NOPAGE. Any non-negative
			 * value returned here will be silently
			 * converted to 0. If the vmpage->mapping is null
			 * the error code would be converted back to ENODATA
			 * in ll_page_mkwrite0. Thus we return -ENODATA
			 * to handle both cases
			 */
			GOTO(out, result = -ENODATA);
		}
	}

        page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
        if (IS_ERR(page))
                GOTO(out, result = PTR_ERR(page));

        /* if page is going to be written, we should add this page into cache
         * earlier. */
        if (fio->ft_mkwrite) {
                wait_on_page_writeback(vmpage);
                if (set_page_dirty(vmpage)) {
                        struct ccc_page *cp;

                        /* vvp_page_assume() calls wait_on_page_writeback(). */
                        cl_page_assume(env, io, page);

                        cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
                        vvp_write_pending(cl2ccc(obj), cp);

                        /* Do not set Dirty bit here so that in case IO is
                         * started before the page is really made dirty, we
                         * still have chance to detect it. */
                        result = cl_page_cache_add(env, io, page, CRT_WRITE);
			LASSERT(cl_page_is_owned(page, io));

			vmpage = NULL;
			if (result < 0) {
				cl_page_unmap(env, io, page);
				cl_page_discard(env, io, page);
				cl_page_disown(env, io, page);

				cl_page_put(env, page);

				/* we're in big trouble, what can we do now? */
				if (result == -EDQUOT)
					result = -ENOSPC;
				GOTO(out, result);
			} else
				cl_page_disown(env, io, page);
		}
	}

	last = cl_index(obj, size - 1);
	/*
	 * The ft_index is only used in the case of
	 * a mkwrite action. We need to check
	 * our assertions are correct, since
	 * we should have caught this above
	 */
	LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
        if (fio->ft_index == last)
                /*
                 * Last page is mapped partially.
                 */
                fio->ft_nob = size - cl_offset(obj, fio->ft_index);
        else
                fio->ft_nob = cl_page_size(obj);

        lu_ref_add(&page->cp_reference, "fault", io);
        fio->ft_page = page;
        EXIT;

out:
	/* return unlocked vmpage to avoid deadlocking */
	if (vmpage != NULL)
		unlock_page(vmpage);
	cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
	return result;
}
예제 #12
0
static int vvp_io_read_start(const struct lu_env *env,
                             const struct cl_io_slice *ios)
{
        struct vvp_io     *vio   = cl2vvp_io(env, ios);
        struct ccc_io     *cio   = cl2ccc_io(env, ios);
        struct cl_io      *io    = ios->cis_io;
        struct cl_object  *obj   = io->ci_obj;
        struct inode      *inode = ccc_object_inode(obj);
        struct ll_ra_read *bead  = &vio->cui_bead;
        struct file       *file  = cio->cui_fd->fd_file;

        int     result;
        loff_t  pos = io->u.ci_rd.rd.crw_pos;
        long    cnt = io->u.ci_rd.rd.crw_count;
        long    tot = cio->cui_tot_count;
        int     exceed = 0;

        CLOBINVRNT(env, obj, ccc_object_invariant(obj));

        CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);

	if (!can_populate_pages(env, io, inode))
		return 0;

        result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
        if (result != 0)
                return result;
        else if (exceed != 0)
                goto out;

        LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
                        "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
                        inode->i_ino, cnt, pos, i_size_read(inode));

        /* turn off the kernel's read-ahead */
        cio->cui_fd->fd_file->f_ra.ra_pages = 0;

        /* initialize read-ahead window once per syscall */
        if (!vio->cui_ra_window_set) {
                vio->cui_ra_window_set = 1;
                bead->lrr_start = cl_index(obj, pos);
		bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
                ll_ra_read_in(file, bead);
        }

        /* BUG: 5972 */
        file_accessed(file);
        switch (vio->cui_io_subtype) {
        case IO_NORMAL:
                 result = lustre_generic_file_read(file, cio, &pos);
                 break;
        case IO_SPLICE:
                result = generic_file_splice_read(file, &pos,
                                vio->u.splice.cui_pipe, cnt,
                                vio->u.splice.cui_flags);
                /* LU-1109: do splice read stripe by stripe otherwise if it
                 * may make nfsd stuck if this read occupied all internal pipe
                 * buffers. */
                io->ci_continue = 0;
                break;
        default:
                CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
                LBUG();
        }

out:
	if (result >= 0) {
		if (result < cnt)
			io->ci_continue = 0;
		io->ci_nob += result;
		ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd,
				  pos, result, READ);
		result = 0;
	}

	return result;
}
예제 #13
0
static int vvp_mmap_locks(const struct lu_env *env,
                          struct ccc_io *vio, struct cl_io *io)
{
        struct ccc_thread_info *cti = ccc_env_info(env);
        struct mm_struct       *mm = current->mm;
        struct vm_area_struct  *vma;
        struct cl_lock_descr   *descr = &cti->cti_descr;
        ldlm_policy_data_t      policy;
        unsigned long           addr;
        unsigned long           seg;
        ssize_t                 count;
        int                     result;
        ENTRY;

        LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);

        if (!cl_is_normalio(env, io))
                RETURN(0);

        if (vio->cui_iov == NULL) /* nfs or loop back device write */
                RETURN(0);

        /* No MM (e.g. NFS)? No vmas too. */
        if (mm == NULL)
                RETURN(0);

        for (seg = 0; seg < vio->cui_nrsegs; seg++) {
                const struct iovec *iv = &vio->cui_iov[seg];

                addr = (unsigned long)iv->iov_base;
                count = iv->iov_len;
                if (count == 0)
                        continue;

                count += addr & (~CFS_PAGE_MASK);
                addr &= CFS_PAGE_MASK;

                down_read(&mm->mmap_sem);
                while((vma = our_vma(mm, addr, count)) != NULL) {
                        struct inode *inode = vma->vm_file->f_dentry->d_inode;
                        int flags = CEF_MUST;

                        if (ll_file_nolock(vma->vm_file)) {
                                /*
                                 * For no lock case, a lockless lock will be
                                 * generated.
                                 */
                                flags = CEF_NEVER;
                        }

                        /*
                         * XXX: Required lock mode can be weakened: CIT_WRITE
                         * io only ever reads user level buffer, and CIT_READ
                         * only writes on it.
                         */
                        policy_from_vma(&policy, vma, addr, count);
                        descr->cld_mode = vvp_mode_from_vma(vma);
                        descr->cld_obj = ll_i2info(inode)->lli_clob;
                        descr->cld_start = cl_index(descr->cld_obj,
                                                    policy.l_extent.start);
                        descr->cld_end = cl_index(descr->cld_obj,
                                                  policy.l_extent.end);
                        descr->cld_enq_flags = flags;
                        result = cl_io_lock_alloc_add(env, io, descr);

                        CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
                               descr->cld_mode, descr->cld_start,
                               descr->cld_end);

                        if (result < 0)
                                RETURN(result);

                        if (vma->vm_end - addr >= count)
                                break;

                        count -= vma->vm_end - addr;
                        addr = vma->vm_end;
                }
                up_read(&mm->mmap_sem);
        }
        RETURN(0);
}
예제 #14
0
파일: lov_lock.c 프로젝트: 513855417/linux
/**
 * Creates sub-locks for a given lov_lock for the first time.
 *
 * Goes through all sub-objects of top-object, and creates sub-locks on every
 * sub-object intersecting with top-lock extent. This is complicated by the
 * fact that top-lock (that is being created) can be accessed concurrently
 * through already created sub-locks (possibly shared with other top-locks).
 */
static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
					  const struct cl_object *obj,
					  struct cl_lock *lock)
{
	int result = 0;
	int i;
	int nr;
	u64 start;
	u64 end;
	u64 file_start;
	u64 file_end;

	struct lov_object       *loo    = cl2lov(obj);
	struct lov_layout_raid0 *r0     = lov_r0(loo);
	struct lov_lock		*lovlck;

	file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start);
	file_end   = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1;

	for (i = 0, nr = 0; i < r0->lo_nr; i++) {
		/*
		 * XXX for wide striping smarter algorithm is desirable,
		 * breaking out of the loop, early.
		 */
		if (likely(r0->lo_sub[i]) && /* spare layout */
		    lov_stripe_intersects(loo->lo_lsm, i,
					  file_start, file_end, &start, &end))
			nr++;
	}
	LASSERT(nr > 0);
	lovlck = libcfs_kvzalloc(offsetof(struct lov_lock, lls_sub[nr]),
				 GFP_NOFS);
	if (!lovlck)
		return ERR_PTR(-ENOMEM);

	lovlck->lls_nr = nr;
	for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
		if (likely(r0->lo_sub[i]) &&
		    lov_stripe_intersects(loo->lo_lsm, i,
					  file_start, file_end, &start, &end)) {
			struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
			struct cl_lock_descr *descr;

			descr = &lls->sub_lock.cll_descr;

			LASSERT(!descr->cld_obj);
			descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
			descr->cld_start = cl_index(descr->cld_obj, start);
			descr->cld_end   = cl_index(descr->cld_obj, end);
			descr->cld_mode  = lock->cll_descr.cld_mode;
			descr->cld_gid   = lock->cll_descr.cld_gid;
			descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
			lls->sub_stripe = i;

			/* initialize sub lock */
			result = lov_sublock_init(env, lock, lls);
			if (result < 0)
				break;

			lls->sub_initialized = 1;
			nr++;
		}
	}
	LASSERT(ergo(result == 0, nr == lovlck->lls_nr));

	if (result != 0) {
		for (i = 0; i < nr; ++i) {
			if (!lovlck->lls_sub[i].sub_initialized)
				break;

			cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
		}
		kvfree(lovlck);
		lovlck = ERR_PTR(result);
	}

	return lovlck;
}
예제 #15
0
파일: osc_lock.c 프로젝트: AK101111/linux
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int osc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct cl_object *obj = NULL;
	int result = 0;
	int discard;
	enum cl_lock_mode mode = CLM_READ;

	LASSERT(flag == LDLM_CB_CANCELING);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
		dlmlock->l_ast_data = NULL;
		unlock_res_and_lock(dlmlock);
		return 0;
	}

	discard = ldlm_is_discard_data(dlmlock);
	if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
		mode = CLM_WRITE;

	if (dlmlock->l_ast_data) {
		obj = osc2cl(dlmlock->l_ast_data);
		dlmlock->l_ast_data = NULL;

		cl_object_get(obj);
	}

	unlock_res_and_lock(dlmlock);

	/* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
	 * the object has been destroyed.
	 */
	if (obj) {
		struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
		struct cl_attr *attr = &osc_env_info(env)->oti_attr;
		__u64 old_kms;

		/* Destroy pages covered by the extent of the DLM lock */
		result = osc_lock_flush(cl2osc(obj),
					cl_index(obj, extent->start),
					cl_index(obj, extent->end),
					mode, discard);

		/* losing a lock, update kms */
		lock_res_and_lock(dlmlock);
		cl_object_attr_lock(obj);
		/* Must get the value under the lock to avoid race. */
		old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
		/* Update the kms. Need to loop all granted locks.
		 * Not a problem for the client
		 */
		attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);

		cl_object_attr_set(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
		unlock_res_and_lock(dlmlock);

		cl_object_put(env, obj);
	}
	return result;
}