int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
	int ret = 0;
	struct ttm_backend *be;

	if (!ttm)
		return -EINVAL;

	if (ttm->state == tt_bound)
		return 0;

	be = ttm->be;

	ret = ttm_tt_populate(ttm);
	if (ret)
		return ret;

	ret = be->func->bind(be, bo_mem);
	if (unlikely(ret != 0))
		return ret;

	ttm->state = tt_bound;

	if (ttm->page_flags & TTM_PAGE_FLAG_USER)
		ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
	return 0;
}
Beispiel #2
0
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
	int ret = 0;
	struct ttm_backend *be;

	if (!ttm)
		return -EINVAL;

	if (ttm->state == tt_bound)
		return 0;

	be = ttm->be;

	ret = ttm_tt_populate(ttm);
	if (ret)
		return ret;

	ret = be->func->bind(be, bo_mem);
	if (ret) {
		printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
		return ret;
	}

	ttm->state = tt_bound;

	if (ttm->page_flags & TTM_PAGE_FLAG_USER)
		ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
	return 0;
}
Beispiel #3
0
static int ttm_tt_set_caching(struct ttm_tt *ttm,
                              enum ttm_caching_state c_state)
{
    int i, j;
    struct page *cur_page;
    int ret;

    if (ttm->caching_state == c_state)
        return 0;

    if (c_state != tt_cached) {
        ret = ttm_tt_populate(ttm);
        if (unlikely(ret != 0))
            return ret;
    }

    if (ttm->caching_state == tt_cached)
        drm_clflush_pages(ttm->pages, ttm->num_pages);

    for (i = 0; i < ttm->num_pages; ++i) {
        cur_page = ttm->pages[i];
        if (likely(cur_page != NULL)) {
            ret = ttm_tt_set_page_caching(cur_page,
                                          ttm->caching_state,
                                          c_state);
            if (unlikely(ret != 0))
                goto out_err;
        }
    }

    ttm->caching_state = c_state;

    return 0;

out_err:
    for (j = 0; j < i; ++j) {
        cur_page = ttm->pages[j];
        if (likely(cur_page != NULL)) {
            (void)ttm_tt_set_page_caching(cur_page, c_state,
                                          ttm->caching_state);
        }
    }

    return ret;
}
Beispiel #4
0
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
		struct ttm_operation_ctx *ctx)
{
	int ret = 0;

	if (!ttm)
		return -EINVAL;

	if (ttm->state == tt_bound)
		return 0;

	ret = ttm_tt_populate(ttm, ctx);
	if (ret)
		return ret;

	ret = ttm->func->bind(ttm, bo_mem);
	if (unlikely(ret != 0))
		return ret;

	ttm->state = tt_bound;

	return 0;
}
Beispiel #5
0
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
	int ret = 0;
	struct ttm_backend *be;

	if (!ttm)
		return -EINVAL;

	if (ttm->state == tt_bound)
		return 0;

	be = ttm->be;

	/* Bypass ttm_tt_populate when TTM_PL_FLAG_VED is enabled */
	if (!(bo_mem->placement & TTM_PL_FLAG_VED)) {
		ret = ttm_tt_populate(ttm);
		if (ret)
			return ret;
	} else {
	/* TTM_PL_FLAG_VED indicates gralloc buffer allocation. As such,
	* TTM allocation and BE bind should be bypassed.
	* ttm->state remains unpopulated.
	*/
		return 0;
	}

	ret = be->func->bind(be, bo_mem);
	if (unlikely(ret != 0))
		return ret;

	ttm->state = tt_bound;

	if (ttm->page_flags & TTM_PAGE_FLAG_USER)
		ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
	return 0;
}