void
vc4_job_reset(struct vc4_context *vc4)
{
        struct vc4_bo **referenced_bos = vc4->bo_pointers.base;
        for (int i = 0; i < cl_offset(&vc4->bo_handles) / 4; i++) {
                vc4_bo_unreference(&referenced_bos[i]);
        }
        vc4_reset_cl(&vc4->bcl);
        vc4_reset_cl(&vc4->shader_rec);
        vc4_reset_cl(&vc4->uniforms);
        vc4_reset_cl(&vc4->bo_handles);
        vc4_reset_cl(&vc4->bo_pointers);
        vc4->shader_rec_count = 0;

        vc4->needs_flush = false;
        vc4->draw_calls_queued = 0;

        /* We have no hardware context saved between our draw calls, so we
         * need to flag the next draw as needing all state emitted.  Emitting
         * all state at the start of our draws is also what ensures that we
         * return to the state we need after a previous tile has finished.
         */
        vc4->dirty = ~0;
        vc4->resolve = 0;
        vc4->cleared = 0;

        vc4->draw_min_x = ~0;
        vc4->draw_min_y = ~0;
        vc4->draw_max_x = 0;
        vc4->draw_max_y = 0;
}
Пример #2
0
/**
 * Prepare partially written-to page for a write.
 */
static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
                                   struct cl_page *pg)
{
    struct cl_attr *attr   = vvp_env_thread_attr(env);
    struct cl_object *obj  = io->ci_obj;
    struct vvp_page *vpg   = cl_object_page_slice(obj, pg);
    loff_t          offset = cl_offset(obj, vvp_index(vpg));
    int             result;

    cl_object_attr_lock(obj);
    result = cl_object_attr_get(env, obj, attr);
    cl_object_attr_unlock(obj);
    if (result == 0) {
        /*
         * If are writing to a new page, no need to read old data.
         * The extent locking will have updated the KMS, and for our
         * purposes here we can treat it like i_size.
         */
        if (attr->cat_kms <= offset) {
            char *kaddr = ll_kmap_atomic(vpg->vpg_page, KM_USER0);

            memset(kaddr, 0, cl_page_size(obj));
            ll_kunmap_atomic(kaddr, KM_USER0);
        } else if (vpg->vpg_defer_uptodate)
            vpg->vpg_ra_used = 1;
        else
            result = ll_page_sync_io(env, io, pg, CRT_READ);
    }
    return result;
}
Пример #3
0
static void osc_page_touch_at(const struct lu_env *env,
			      struct cl_object *obj, pgoff_t idx, unsigned to)
{
	struct lov_oinfo  *loi  = cl2osc(obj)->oo_oinfo;
	struct cl_attr    *attr = &osc_env_info(env)->oti_attr;
	int valid;
	__u64 kms;

	/* offset within stripe */
	kms = cl_offset(obj, idx) + to;

	cl_object_attr_lock(obj);
	/*
	 * XXX old code used
	 *
	 *	 ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
	 *
	 * here
	 */
	CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
	       kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
	       loi->loi_lvb.lvb_size);

	valid = 0;
	if (kms > loi->loi_kms) {
		attr->cat_kms = kms;
		valid |= CAT_KMS;
	}
	if (kms > loi->loi_lvb.lvb_size) {
		attr->cat_size = kms;
		valid |= CAT_SIZE;
	}
	cl_object_attr_set(env, obj, attr, valid);
	cl_object_attr_unlock(obj);
}
Пример #4
0
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
			struct cl_page *page, struct page *vmpage)
{
	struct lov_object *loo = cl2lov(obj);
	struct lov_layout_raid0 *r0 = lov_r0(loo);
	struct lov_io     *lio = lov_env_io(env);
	struct cl_page    *subpage;
	struct cl_object  *subobj;
	struct lov_io_sub *sub;
	struct lov_page   *lpg = cl_object_page_slice(obj, page);
	loff_t	     offset;
	u64	    suboff;
	int		stripe;
	int		rc;

	offset = cl_offset(obj, page->cp_index);
	stripe = lov_stripe_number(loo->lo_lsm, offset);
	LASSERT(stripe < r0->lo_nr);
	rc = lov_stripe_offset(loo->lo_lsm, offset, stripe,
				   &suboff);
	LASSERT(rc == 0);

	lpg->lps_invalid = 1;
	cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);

	sub = lov_sub_get(env, lio, stripe);
	if (IS_ERR(sub)) {
		rc = PTR_ERR(sub);
		goto out;
	}

	subobj = lovsub2cl(r0->lo_sub[stripe]);
	subpage = cl_page_find_sub(sub->sub_env, subobj,
				   cl_index(subobj, suboff), vmpage, page);
	lov_sub_put(sub);
	if (IS_ERR(subpage)) {
		rc = PTR_ERR(subpage);
		goto out;
	}

	if (likely(subpage->cp_parent == page)) {
		lu_ref_add(&subpage->cp_reference, "lov", page);
		lpg->lps_invalid = 0;
		rc = 0;
	} else {
		CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
		CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
		LASSERT(0);
	}

out:
	return rc;
}
Пример #5
0
void
cl_ensure_space(struct vc4_cl *cl, uint32_t space)
{
        uint32_t offset = cl_offset(cl);

        if (offset + space <= cl->size)
                return;

        uint32_t size = MAX2(cl->size + space, cl->size * 2);

        cl->base = reralloc(ralloc_parent(cl->base), cl->base, uint8_t, size);
        cl->size = size;
        cl->next = cl->base + offset;
}
Пример #6
0
static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
			    struct osc_io *oio, __u64 size)
{
	struct cl_object *clob;
	int     partial;
	pgoff_t start;

	clob    = oio->oi_cl.cis_obj;
	start   = cl_index(clob, size);
	partial = cl_offset(clob, start) < size;

	/*
	 * Complain if there are pages in the truncated region.
	 */
	cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF,
			    trunc_check_cb, (void *)&size);
}
Пример #7
0
/**
 * Flushes the current command lists if they reference the given BO.
 *
 * This helps avoid flushing the command buffers when unnecessary.
 */
bool
vc4_cl_references_bo(struct pipe_context *pctx, struct vc4_bo *bo,
                     bool include_reads)
{
    struct vc4_context *vc4 = vc4_context(pctx);

    if (!vc4->needs_flush)
        return false;

    /* Walk all the referenced BOs in the drawing command list to see if
     * they match.
     */
    if (include_reads) {
        struct vc4_bo **referenced_bos = vc4->bo_pointers.base;
        for (int i = 0; i < cl_offset(&vc4->bo_handles) / 4; i++) {
            if (referenced_bos[i] == bo) {
                return true;
            }
        }
    }

    /* Also check for the Z/color buffers, since the references to those
     * are only added immediately before submit.
     */
    struct vc4_surface *csurf = vc4_surface(vc4->framebuffer.cbufs[0]);
    if (csurf) {
        struct vc4_resource *ctex = vc4_resource(csurf->base.texture);
        if (ctex->bo == bo) {
            return true;
        }
    }

    struct vc4_surface *zsurf = vc4_surface(vc4->framebuffer.zsbuf);
    if (zsurf) {
        struct vc4_resource *ztex =
            vc4_resource(zsurf->base.texture);
        if (ztex->bo == bo) {
            return true;
        }
    }

    return false;
}
Пример #8
0
/**
 * Prepare partially written-to page for a write.
 */
static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
                                  struct cl_object *obj, struct cl_page *pg,
                                  struct ccc_page *cp,
                                  unsigned from, unsigned to)
{
        struct cl_attr *attr   = ccc_env_thread_attr(env);
        loff_t          offset = cl_offset(obj, pg->cp_index);
        int             result;

        cl_object_attr_lock(obj);
        result = cl_object_attr_get(env, obj, attr);
        cl_object_attr_unlock(obj);
        if (result == 0) {
                /*
                 * If are writing to a new page, no need to read old data.
                 * The extent locking will have updated the KMS, and for our
                 * purposes here we can treat it like i_size.
                 */
                if (attr->cat_kms <= offset) {
                        char *kaddr = ll_kmap_atomic(cp->cpg_page, KM_USER0);

                        memset(kaddr, 0, cl_page_size(obj));
                        ll_kunmap_atomic(kaddr, KM_USER0);
                } else if (cp->cpg_defer_uptodate)
                        cp->cpg_ra_used = 1;
                else
                        result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
                /*
                 * In older implementations, obdo_refresh_inode is called here
                 * to update the inode because the write might modify the
                 * object info at OST. However, this has been proven useless,
                 * since LVB functions will be called when user space program
                 * tries to retrieve inode attribute.  Also, see bug 15909 for
                 * details. -jay
                 */
                if (result == 0)
                        cl_page_export(env, pg, 1);
        }
        return result;
}
Пример #9
0
uint32_t
vc4_gem_hindex(struct vc4_context *vc4, struct vc4_bo *bo)
{
        uint32_t hindex;
        uint32_t *current_handles = vc4->bo_handles.base;

        for (hindex = 0; hindex < cl_offset(&vc4->bo_handles) / 4; hindex++) {
                if (current_handles[hindex] == bo->handle)
                        return hindex;
        }

        struct vc4_cl_out *out;

        out = cl_start(&vc4->bo_handles);
        cl_u32(&out, bo->handle);
        cl_end(&vc4->bo_handles, out);

        out = cl_start(&vc4->bo_pointers);
        cl_ptr(&out, vc4_bo_reference(bo));
        cl_end(&vc4->bo_pointers, out);

        return hindex;
}
Пример #10
0
/**
 * This is called when a page is accessed within file in a way that creates
 * new page, if one were missing (i.e., if there were a hole at that place in
 * the file, or accessed page is beyond the current file size).
 *
 * Expand stripe KMS if necessary.
 */
static void osc_page_touch_at(const struct lu_env *env,
			      struct cl_object *obj, pgoff_t idx, size_t to)
{
        struct lov_oinfo  *loi  = cl2osc(obj)->oo_oinfo;
        struct cl_attr    *attr = &osc_env_info(env)->oti_attr;
        int valid;
        __u64 kms;

        /* offset within stripe */
        kms = cl_offset(obj, idx) + to;

        cl_object_attr_lock(obj);
        /*
         * XXX old code used
         *
         *         ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
         *
         * here
         */
        CDEBUG(D_INODE, "stripe KMS %sincreasing "LPU64"->"LPU64" "LPU64"\n",
               kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
               loi->loi_lvb.lvb_size);

	attr->cat_mtime = attr->cat_ctime = LTIME_S(CFS_CURRENT_TIME);
	valid = CAT_MTIME | CAT_CTIME;
	if (kms > loi->loi_kms) {
		attr->cat_kms = kms;
		valid |= CAT_KMS;
	}
	if (kms > loi->loi_lvb.lvb_size) {
		attr->cat_size = kms;
		valid |= CAT_SIZE;
	}
	cl_object_attr_update(env, obj, attr, valid);
	cl_object_attr_unlock(obj);
}
Пример #11
0
/**
 * Submits the job to the kernel and then reinitializes it.
 */
void
vc4_job_submit(struct vc4_context *vc4)
{
        if (vc4_debug & VC4_DEBUG_CL) {
                fprintf(stderr, "BCL:\n");
                vc4_dump_cl(vc4->bcl.base, cl_offset(&vc4->bcl), false);
        }

        struct drm_vc4_submit_cl submit;
        memset(&submit, 0, sizeof(submit));

        cl_ensure_space(&vc4->bo_handles, 6 * sizeof(uint32_t));
        cl_ensure_space(&vc4->bo_pointers, 6 * sizeof(struct vc4_bo *));

        vc4_submit_setup_rcl_surface(vc4, &submit.color_read,
                                     vc4->color_read, false, false);
        vc4_submit_setup_rcl_render_config_surface(vc4, &submit.color_write,
                                                   vc4->color_write);
        vc4_submit_setup_rcl_surface(vc4, &submit.zs_read,
                                     vc4->zs_read, true, false);
        vc4_submit_setup_rcl_surface(vc4, &submit.zs_write,
                                     vc4->zs_write, true, true);

        vc4_submit_setup_rcl_msaa_surface(vc4, &submit.msaa_color_write,
                                          vc4->msaa_color_write);
        vc4_submit_setup_rcl_msaa_surface(vc4, &submit.msaa_zs_write,
                                          vc4->msaa_zs_write);

        if (vc4->msaa) {
                /* This bit controls how many pixels the general
                 * (i.e. subsampled) loads/stores are iterating over
                 * (multisample loads replicate out to the other samples).
                 */
                submit.color_write.bits |= VC4_RENDER_CONFIG_MS_MODE_4X;
                /* Controls whether color_write's
                 * VC4_PACKET_STORE_MS_TILE_BUFFER does 4x decimation
                 */
                submit.color_write.bits |= VC4_RENDER_CONFIG_DECIMATE_MODE_4X;
        }

        submit.bo_handles = (uintptr_t)vc4->bo_handles.base;
        submit.bo_handle_count = cl_offset(&vc4->bo_handles) / 4;
        submit.bin_cl = (uintptr_t)vc4->bcl.base;
        submit.bin_cl_size = cl_offset(&vc4->bcl);
        submit.shader_rec = (uintptr_t)vc4->shader_rec.base;
        submit.shader_rec_size = cl_offset(&vc4->shader_rec);
        submit.shader_rec_count = vc4->shader_rec_count;
        submit.uniforms = (uintptr_t)vc4->uniforms.base;
        submit.uniforms_size = cl_offset(&vc4->uniforms);

        assert(vc4->draw_min_x != ~0 && vc4->draw_min_y != ~0);
        submit.min_x_tile = vc4->draw_min_x / vc4->tile_width;
        submit.min_y_tile = vc4->draw_min_y / vc4->tile_height;
        submit.max_x_tile = (vc4->draw_max_x - 1) / vc4->tile_width;
        submit.max_y_tile = (vc4->draw_max_y - 1) / vc4->tile_height;
        submit.width = vc4->draw_width;
        submit.height = vc4->draw_height;
        if (vc4->cleared) {
                submit.flags |= VC4_SUBMIT_CL_USE_CLEAR_COLOR;
                submit.clear_color[0] = vc4->clear_color[0];
                submit.clear_color[1] = vc4->clear_color[1];
                submit.clear_z = vc4->clear_depth;
                submit.clear_s = vc4->clear_stencil;
        }

        if (!(vc4_debug & VC4_DEBUG_NORAST)) {
                int ret;

#ifndef USE_VC4_SIMULATOR
                ret = drmIoctl(vc4->fd, DRM_IOCTL_VC4_SUBMIT_CL, &submit);
#else
                ret = vc4_simulator_flush(vc4, &submit);
#endif
                static bool warned = false;
                if (ret && !warned) {
                        fprintf(stderr, "Draw call returned %s.  "
                                        "Expect corruption.\n", strerror(errno));
                        warned = true;
                }
        }

        vc4->last_emit_seqno = submit.seqno;

        if (vc4_debug & VC4_DEBUG_ALWAYS_SYNC) {
                if (!vc4_wait_seqno(vc4->screen, vc4->last_emit_seqno,
                                    PIPE_TIMEOUT_INFINITE, "sync")) {
                        fprintf(stderr, "Wait failed.\n");
                        abort();
                }
        }

        vc4_job_reset(vc4);
}
Пример #12
0
/**
 * Submits the job to the kernel and then reinitializes it.
 */
void
vc4_job_submit(struct vc4_context *vc4)
{
        if (vc4_debug & VC4_DEBUG_CL) {
                fprintf(stderr, "BCL:\n");
                vc4_dump_cl(vc4->bcl.base, cl_offset(&vc4->bcl), false);
        }

        struct drm_vc4_submit_cl submit;
        memset(&submit, 0, sizeof(submit));

        cl_ensure_space(&vc4->bo_handles, 4 * sizeof(uint32_t));
        cl_ensure_space(&vc4->bo_pointers, 4 * sizeof(struct vc4_bo *));

        vc4_submit_setup_rcl_surface(vc4, &submit.color_read,
                                     vc4->color_read, false, false);
        vc4_submit_setup_ms_rcl_surface(vc4, &submit.color_ms_write,
                                        vc4->color_write);
        vc4_submit_setup_rcl_surface(vc4, &submit.zs_read,
                                     vc4->zs_read, true, false);
        vc4_submit_setup_rcl_surface(vc4, &submit.zs_write,
                                     vc4->zs_write, true, true);

        submit.bo_handles = (uintptr_t)vc4->bo_handles.base;
        submit.bo_handle_count = cl_offset(&vc4->bo_handles) / 4;
        submit.bin_cl = (uintptr_t)vc4->bcl.base;
        submit.bin_cl_size = cl_offset(&vc4->bcl);
        submit.shader_rec = (uintptr_t)vc4->shader_rec.base;
        submit.shader_rec_size = cl_offset(&vc4->shader_rec);
        submit.shader_rec_count = vc4->shader_rec_count;
        submit.uniforms = (uintptr_t)vc4->uniforms.base;
        submit.uniforms_size = cl_offset(&vc4->uniforms);

        assert(vc4->draw_min_x != ~0 && vc4->draw_min_y != ~0);
        submit.min_x_tile = vc4->draw_min_x / 64;
        submit.min_y_tile = vc4->draw_min_y / 64;
        submit.max_x_tile = (vc4->draw_max_x - 1) / 64;
        submit.max_y_tile = (vc4->draw_max_y - 1) / 64;
        submit.width = vc4->draw_width;
        submit.height = vc4->draw_height;
        if (vc4->cleared) {
                submit.flags |= VC4_SUBMIT_CL_USE_CLEAR_COLOR;
                submit.clear_color[0] = vc4->clear_color[0];
                submit.clear_color[1] = vc4->clear_color[1];
                submit.clear_z = vc4->clear_depth;
                submit.clear_s = vc4->clear_stencil;
        }

        if (!(vc4_debug & VC4_DEBUG_NORAST)) {
                int ret;

#ifndef USE_VC4_SIMULATOR
                ret = drmIoctl(vc4->fd, DRM_IOCTL_VC4_SUBMIT_CL, &submit);
#else
                ret = vc4_simulator_flush(vc4, &submit);
#endif
                if (ret) {
                        fprintf(stderr, "VC4 submit failed\n");
                        abort();
                }
        }

        vc4->last_emit_seqno = submit.seqno;

        if (vc4_debug & VC4_DEBUG_ALWAYS_SYNC) {
                if (!vc4_wait_seqno(vc4->screen, vc4->last_emit_seqno,
                                    PIPE_TIMEOUT_INFINITE, "sync")) {
                        fprintf(stderr, "Wait failed.\n");
                        abort();
                }
        }

        vc4_job_reset(vc4);
}
Пример #13
0
static int vvp_io_fault_start(const struct lu_env *env,
                              const struct cl_io_slice *ios)
{
	struct vvp_io       *vio     = cl2vvp_io(env, ios);
	struct cl_io        *io      = ios->cis_io;
	struct cl_object    *obj     = io->ci_obj;
	struct inode        *inode   = ccc_object_inode(obj);
	struct cl_fault_io  *fio     = &io->u.ci_fault;
	struct vvp_fault_io *cfio    = &vio->u.fault;
	loff_t               offset;
	int                  result  = 0;
	struct page          *vmpage  = NULL;
	struct cl_page      *page;
	loff_t               size;
	pgoff_t              last; /* last page in a file data region */

        if (fio->ft_executable &&
            LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
                CWARN("binary "DFID
                      " changed while waiting for the page fault lock\n",
                      PFID(lu_object_fid(&obj->co_lu)));

        /* offset of the last byte on the page */
        offset = cl_offset(obj, fio->ft_index + 1) - 1;
        LASSERT(cl_index(obj, offset) == fio->ft_index);
        result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
        if (result != 0)
                return result;

	/* must return locked page */
	if (fio->ft_mkwrite) {
		LASSERT(cfio->ft_vmpage != NULL);
		lock_page(cfio->ft_vmpage);
	} else {
		result = vvp_io_kernel_fault(cfio);
		if (result != 0)
			return result;
	}

	vmpage = cfio->ft_vmpage;
	LASSERT(PageLocked(vmpage));

	if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
		ll_invalidate_page(vmpage);

	size = i_size_read(inode);
        /* Though we have already held a cl_lock upon this page, but
         * it still can be truncated locally. */
	if (unlikely((vmpage->mapping != inode->i_mapping) ||
		     (page_offset(vmpage) > size))) {
                CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");

                /* return +1 to stop cl_io_loop() and ll_fault() will catch
                 * and retry. */
                GOTO(out, result = +1);
        }


	if (fio->ft_mkwrite ) {
		pgoff_t last_index;
		/*
		 * Capture the size while holding the lli_trunc_sem from above
		 * we want to make sure that we complete the mkwrite action
		 * while holding this lock. We need to make sure that we are
		 * not past the end of the file.
		 */
		last_index = cl_index(obj, size - 1);
		if (last_index < fio->ft_index) {
			CDEBUG(D_PAGE,
				"llite: mkwrite and truncate race happened: "
				"%p: 0x%lx 0x%lx\n",
				vmpage->mapping,fio->ft_index,last_index);
			/*
			 * We need to return if we are
			 * passed the end of the file. This will propagate
			 * up the call stack to ll_page_mkwrite where
			 * we will return VM_FAULT_NOPAGE. Any non-negative
			 * value returned here will be silently
			 * converted to 0. If the vmpage->mapping is null
			 * the error code would be converted back to ENODATA
			 * in ll_page_mkwrite0. Thus we return -ENODATA
			 * to handle both cases
			 */
			GOTO(out, result = -ENODATA);
		}
	}

        page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
        if (IS_ERR(page))
                GOTO(out, result = PTR_ERR(page));

        /* if page is going to be written, we should add this page into cache
         * earlier. */
        if (fio->ft_mkwrite) {
                wait_on_page_writeback(vmpage);
                if (set_page_dirty(vmpage)) {
                        struct ccc_page *cp;

                        /* vvp_page_assume() calls wait_on_page_writeback(). */
                        cl_page_assume(env, io, page);

                        cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
                        vvp_write_pending(cl2ccc(obj), cp);

                        /* Do not set Dirty bit here so that in case IO is
                         * started before the page is really made dirty, we
                         * still have chance to detect it. */
                        result = cl_page_cache_add(env, io, page, CRT_WRITE);
			LASSERT(cl_page_is_owned(page, io));

			vmpage = NULL;
			if (result < 0) {
				cl_page_unmap(env, io, page);
				cl_page_discard(env, io, page);
				cl_page_disown(env, io, page);

				cl_page_put(env, page);

				/* we're in big trouble, what can we do now? */
				if (result == -EDQUOT)
					result = -ENOSPC;
				GOTO(out, result);
			} else
				cl_page_disown(env, io, page);
		}
	}

	last = cl_index(obj, size - 1);
	/*
	 * The ft_index is only used in the case of
	 * a mkwrite action. We need to check
	 * our assertions are correct, since
	 * we should have caught this above
	 */
	LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
        if (fio->ft_index == last)
                /*
                 * Last page is mapped partially.
                 */
                fio->ft_nob = size - cl_offset(obj, fio->ft_index);
        else
                fio->ft_nob = cl_page_size(obj);

        lu_ref_add(&page->cp_reference, "fault", io);
        fio->ft_page = page;
        EXIT;

out:
	/* return unlocked vmpage to avoid deadlocking */
	if (vmpage != NULL)
		unlock_page(vmpage);
	cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
	return result;
}
Пример #14
0
/**
 * Creates sub-locks for a given lov_lock for the first time.
 *
 * Goes through all sub-objects of top-object, and creates sub-locks on every
 * sub-object intersecting with top-lock extent. This is complicated by the
 * fact that top-lock (that is being created) can be accessed concurrently
 * through already created sub-locks (possibly shared with other top-locks).
 */
static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
					  const struct cl_object *obj,
					  struct cl_lock *lock)
{
	int result = 0;
	int i;
	int nr;
	u64 start;
	u64 end;
	u64 file_start;
	u64 file_end;

	struct lov_object       *loo    = cl2lov(obj);
	struct lov_layout_raid0 *r0     = lov_r0(loo);
	struct lov_lock		*lovlck;

	file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start);
	file_end   = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1;

	for (i = 0, nr = 0; i < r0->lo_nr; i++) {
		/*
		 * XXX for wide striping smarter algorithm is desirable,
		 * breaking out of the loop, early.
		 */
		if (likely(r0->lo_sub[i]) && /* spare layout */
		    lov_stripe_intersects(loo->lo_lsm, i,
					  file_start, file_end, &start, &end))
			nr++;
	}
	LASSERT(nr > 0);
	lovlck = libcfs_kvzalloc(offsetof(struct lov_lock, lls_sub[nr]),
				 GFP_NOFS);
	if (!lovlck)
		return ERR_PTR(-ENOMEM);

	lovlck->lls_nr = nr;
	for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
		if (likely(r0->lo_sub[i]) &&
		    lov_stripe_intersects(loo->lo_lsm, i,
					  file_start, file_end, &start, &end)) {
			struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
			struct cl_lock_descr *descr;

			descr = &lls->sub_lock.cll_descr;

			LASSERT(!descr->cld_obj);
			descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
			descr->cld_start = cl_index(descr->cld_obj, start);
			descr->cld_end   = cl_index(descr->cld_obj, end);
			descr->cld_mode  = lock->cll_descr.cld_mode;
			descr->cld_gid   = lock->cll_descr.cld_gid;
			descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
			lls->sub_stripe = i;

			/* initialize sub lock */
			result = lov_sublock_init(env, lock, lls);
			if (result < 0)
				break;

			lls->sub_initialized = 1;
			nr++;
		}
	}
	LASSERT(ergo(result == 0, nr == lovlck->lls_nr));

	if (result != 0) {
		for (i = 0; i < nr; ++i) {
			if (!lovlck->lls_sub[i].sub_initialized)
				break;

			cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
		}
		kvfree(lovlck);
		lovlck = ERR_PTR(result);
	}

	return lovlck;
}