Пример #1
0
int omap_tiler_alloc(struct ion_heap *heap,
		     struct ion_client *client,
		     struct omap_ion_tiler_alloc_data *data)
{
	struct ion_handle *handle;
	struct ion_buffer *buffer;
	struct omap_tiler_info *info = NULL;
	u32 n_phys_pages;
	u32 n_tiler_pages;
	u32 tiler_start = 0;
	u32 v_size;
	tiler_blk_handle tiler_handle;
	int ret;

	if (data->fmt == TILER_PIXEL_FMT_PAGE && data->h != 1) {
		pr_err("%s: Page mode (1D) allocations must have a height "
		       "of one\n", __func__);
		return -EINVAL;
	}

	ret = tiler_memsize(data->fmt, data->w, data->h,
			    &n_phys_pages,
			    &n_tiler_pages);

	if (ret) {
		pr_err("%s: invalid tiler request w %u h %u fmt %u\n", __func__,
		       data->w, data->h, data->fmt);
		return ret;
	}

	BUG_ON(!n_phys_pages || !n_tiler_pages);

	if( (TILER_ENABLE_NON_PAGE_ALIGNED_ALLOCATIONS)
			&& (data->token != 0) ) {
		tiler_handle = tiler_alloc_block_area_aligned(data->fmt, data->w, data->h,
									    &tiler_start,
									    NULL,
									    data->out_align,
									    data->offset,
									    data->token);
	} else {
		tiler_handle = tiler_alloc_block_area(data->fmt, data->w, data->h,
							    &tiler_start,
							    NULL);
	}

	if (IS_ERR_OR_NULL(tiler_handle)) {
		ret = PTR_ERR(tiler_handle);
		pr_err("%s: failure to allocate address space from tiler\n",
		       __func__);
		goto err_nomem;
	}

	v_size = tiler_block_vsize(tiler_handle);

	if(!v_size)
		goto err_alloc;

	n_tiler_pages = (PAGE_ALIGN(v_size) / PAGE_SIZE);

	info = kzalloc(sizeof(struct omap_tiler_info) +
		       sizeof(u32) * n_phys_pages +
		       sizeof(u32) * n_tiler_pages, GFP_KERNEL);
	if (!info)
		goto err_alloc;

	info->tiler_handle = tiler_handle;
	info->tiler_start = tiler_start;
	info->n_phys_pages = n_phys_pages;
	info->n_tiler_pages = n_tiler_pages;
	info->phys_addrs = (u32 *)(info + 1);
	info->tiler_addrs = info->phys_addrs + n_phys_pages;
	info->fmt = data->fmt;

	if ((heap->id == OMAP_ION_HEAP_TILER) ||
	    (heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
		if (use_dynamic_pages)
			ret = omap_tiler_alloc_dynamicpages(info);
		else
			ret = omap_tiler_alloc_carveout(heap, info);

		if (ret)
			goto err_alloc;

		ret = tiler_pin_block(info->tiler_handle, info->phys_addrs,
				      info->n_phys_pages);
		if (ret) {
			pr_err("%s: failure to pin pages to tiler\n",
				__func__);
			goto err_pin;
		}
	}
	data->stride = tiler_block_vstride(info->tiler_handle);

	/* create an ion handle  for the allocation */
	handle = ion_alloc(client, 0, 0, 1 << heap->id);
	if (IS_ERR_OR_NULL(handle)) {
		ret = PTR_ERR(handle);
		pr_err("%s: failure to allocate handle to manage tiler"
		       " allocation\n", __func__);
		goto err;
	}

	buffer = ion_handle_buffer(handle);
	buffer->heap = heap;	/* clarify tiler heap */
	buffer->size = v_size;
	buffer->priv_virt = info;
	data->handle = handle;
	data->offset = (size_t)(info->tiler_start & ~PAGE_MASK);

	if(tiler_fill_virt_array(tiler_handle, info->tiler_addrs,
			&n_tiler_pages) < 0) {
		pr_err("%s: failure filling tiler's virtual array %d\n",
				__func__, n_tiler_pages);
	}

	return 0;

err:
	tiler_unpin_block(info->tiler_handle);
err_pin:
	if ((heap->id == OMAP_ION_HEAP_TILER) ||
	    (heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
		if (use_dynamic_pages)
			omap_tiler_free_dynamicpages(info);
		else
			omap_tiler_free_carveout(heap, info);
	}
err_alloc:
	tiler_free_block_area(tiler_handle);
err_nomem:
	kfree(info);
	return ret;
}
Пример #2
0
int omap_tiler_alloc(struct ion_heap *heap,
		     struct ion_client *client,
		     struct omap_ion_tiler_alloc_data *data)
{
	struct ion_handle *handle;
	struct ion_buffer *buffer;
	struct omap_tiler_info *info = NULL;
	u32 n_phys_pages;
	u32 n_tiler_pages;
	u32 tiler_start = 0;
	u32 v_size;
	tiler_blk_handle tiler_handle;
	ion_phys_addr_t addr = 0;
	int i = 0, ret;

	if (data->fmt == TILER_PIXEL_FMT_PAGE && data->h != 1) {
		pr_err("%s: Page mode (1D) allocations must have a height "
		       "of one\n", __func__);
		return -EINVAL;
	}

	ret = tiler_memsize(data->fmt, data->w, data->h,
			    &n_phys_pages,
			    &n_tiler_pages);

	if (ret) {
		pr_err("%s: invalid tiler request w %u h %u fmt %u\n", __func__,
		       data->w, data->h, data->fmt);
		return ret;
	}

	BUG_ON(!n_phys_pages || !n_tiler_pages);

	if( (TILER_ENABLE_NON_PAGE_ALIGNED_ALLOCATIONS)
			&& (data->token != 0) ) {
		tiler_handle = tiler_alloc_block_area_aligned(data->fmt, data->w, data->h,
									    &tiler_start,
									    NULL,
									    data->out_align,
									    data->offset,
									    data->token);
	} else {
		tiler_handle = tiler_alloc_block_area(data->fmt, data->w, data->h,
							    &tiler_start,
							    NULL);
	}

	if (IS_ERR_OR_NULL(tiler_handle)) {
		ret = PTR_ERR(tiler_handle);
		pr_err("%s: failure to allocate address space from tiler\n",
		       __func__);
		goto err_nomem;
	}

	v_size = tiler_block_vsize(tiler_handle);

	if(!v_size)
		goto err_alloc;

	n_tiler_pages = (PAGE_ALIGN(v_size) / PAGE_SIZE);

	info = kzalloc(sizeof(struct omap_tiler_info) +
		       sizeof(u32) * n_phys_pages +
		       sizeof(u32) * n_tiler_pages, GFP_KERNEL);
	if (!info)
		goto err_alloc;

	info->tiler_handle = tiler_handle;
	info->tiler_start = tiler_start;
	info->n_phys_pages = n_phys_pages;
	info->n_tiler_pages = n_tiler_pages;
	info->phys_addrs = (u32 *)(info + 1);
	info->tiler_addrs = info->phys_addrs + n_phys_pages;
	info->fmt = data->fmt;

	addr = ion_carveout_allocate(heap, n_phys_pages*PAGE_SIZE, 0);
	if (addr == ION_CARVEOUT_ALLOCATE_FAIL) {
		for (i = 0; i < n_phys_pages; i++) {
			addr = ion_carveout_allocate(heap, PAGE_SIZE, 0);

			if (addr == ION_CARVEOUT_ALLOCATE_FAIL) {
				ret = -ENOMEM;
				pr_err("%s: failed to allocate pages to back "
					"tiler address space\n", __func__);
				goto err_alloc;
			}
			info->phys_addrs[i] = addr;
		}
	} else {
		info->lump = true;
		for (i = 0; i < n_phys_pages; i++)
			info->phys_addrs[i] = addr + i*PAGE_SIZE;
	}

	ret = tiler_pin_block(info->tiler_handle, info->phys_addrs,
			      info->n_phys_pages);
	if (ret) {
		pr_err("%s: failure to pin pages to tiler\n", __func__);
		goto err_alloc;
	}

	data->stride = tiler_block_vstride(info->tiler_handle);

	/* create an ion handle  for the allocation */
	handle = ion_alloc(client, 0, 0, 1 << heap->id);
	if (IS_ERR_OR_NULL(handle)) {
		ret = PTR_ERR(handle);
		pr_err("%s: failure to allocate handle to manage tiler"
		       " allocation\n", __func__);
		goto err;
	}

	buffer = ion_handle_buffer(handle);
	buffer->size = v_size;
	buffer->priv_virt = info;
	data->handle = handle;
	data->offset = (size_t)(info->tiler_start & ~PAGE_MASK);

	if(tiler_fill_virt_array(tiler_handle, info->tiler_addrs,
			&n_tiler_pages) < 0) {
		pr_err("%s: failure filling tiler's virtual array %d\n",
				__func__, n_tiler_pages);
	}

	return 0;

err:
	tiler_unpin_block(info->tiler_handle);
err_alloc:
	tiler_free_block_area(info->tiler_handle);
	if(info)
	{
		if (info->lump)
			ion_carveout_free(heap, addr, n_phys_pages * PAGE_SIZE);
		else
			for (i -= 1; i >= 0; i--)
				ion_carveout_free(heap, info->phys_addrs[i], PAGE_SIZE);
	}
err_nomem:
	kfree(info);
	return ret;
}