Ejemplo n.º 1
0
static int tegra_dc_ext_flip(struct tegra_dc_ext_user *user,
			     struct tegra_dc_ext_flip *args)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc_ext_flip_data *data;
	int work_index;
	int i, ret = 0;

	if (!user->nvmap)
		return -EFAULT;

	ret = sanitize_flip_args(user, args);
	if (ret)
		return ret;

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	INIT_WORK(&data->work, tegra_dc_ext_flip_worker);
	data->ext = ext;

	for (i = 0; i < DC_N_WINDOWS; i++) {
		struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
		int index = args->win[i].index;

		memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr));

		if (index < 0)
			continue;

		ret = tegra_dc_ext_pin_window(user, flip_win->attr.buff_id,
					      &flip_win->handle[TEGRA_DC_Y],
					      &flip_win->phys_addr);
		if (ret)
			goto fail_pin;

		if (flip_win->attr.buff_id_u) {
			ret = tegra_dc_ext_pin_window(user,
					      flip_win->attr.buff_id_u,
					      &flip_win->handle[TEGRA_DC_U],
					      &flip_win->phys_addr_u);
			if (ret)
				goto fail_pin;
		} else {
			flip_win->handle[TEGRA_DC_U] = NULL;
			flip_win->phys_addr_u = 0;
		}

		if (flip_win->attr.buff_id_v) {
			ret = tegra_dc_ext_pin_window(user,
					      flip_win->attr.buff_id_v,
					      &flip_win->handle[TEGRA_DC_V],
					      &flip_win->phys_addr_v);
			if (ret)
				goto fail_pin;
		} else {
			flip_win->handle[TEGRA_DC_V] = NULL;
			flip_win->phys_addr_v = 0;
		}
	}

	ret = lock_windows_for_flip(user, args);
	if (ret)
		goto fail_pin;

	if (!ext->enabled) {
		ret = -ENXIO;
		goto unlock;
	}

	for (i = 0; i < DC_N_WINDOWS; i++) {
		u32 syncpt_max;
		int index = args->win[i].index;

		if (index < 0)
			continue;

		syncpt_max = tegra_dc_incr_syncpt_max(ext->dc, index);

		data->win[i].syncpt_max = syncpt_max;

		/*
		 * Any of these windows' syncpoints should be equivalent for
		 * the client, so we just send back an arbitrary one of them
		 */
		args->post_syncpt_val = syncpt_max;
		args->post_syncpt_id = tegra_dc_get_syncpt_id(ext->dc, index);
		work_index = index;
	}
	queue_work(ext->win[work_index].flip_wq, &data->work);

	unlock_windows_for_flip(user, args);

	return 0;

unlock:
	unlock_windows_for_flip(user, args);

fail_pin:
	for (i = 0; i < DC_N_WINDOWS; i++) {
		int j;
		for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) {
			if (!data->win[i].handle[j])
				continue;

			nvmap_unpin(ext->nvmap, data->win[i].handle[j]);
			nvmap_free(ext->nvmap, data->win[i].handle[j]);
		}
	}
	kfree(data);

	return ret;
}
Ejemplo n.º 2
0
int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user,
				  struct tegra_dc_ext_cursor_image *args)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc *dc = ext->dc;
	struct nvmap_handle_ref *handle, *old_handle;
	dma_addr_t phys_addr;
	u32 size;
	int ret;

	if (!user->nvmap)
		return -EFAULT;

	size = args->flags & (TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 |
			      TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64);

	if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 &&
	    size !=  TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64)
		return -EINVAL;

	mutex_lock(&ext->cursor.lock);

	if (ext->cursor.user != user) {
		ret = -EACCES;
		goto unlock;
	}

	if (!ext->enabled) {
		ret = -ENXIO;
		goto unlock;
	}

	old_handle = ext->cursor.cur_handle;

	ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr);
	if (ret)
		goto unlock;

	ext->cursor.cur_handle = handle;

	mutex_lock(&dc->lock);

	set_cursor_image_hw(dc, args, phys_addr);

	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);

	/* XXX sync here? */

	mutex_unlock(&dc->lock);

	mutex_unlock(&ext->cursor.lock);

	if (old_handle) {
		nvmap_unpin(ext->nvmap, old_handle);
		nvmap_free(ext->nvmap, old_handle);
	}

	return 0;

unlock:
	mutex_unlock(&ext->cursor.lock);

	return ret;
}
Ejemplo n.º 3
0
int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user,
				  struct tegra_dc_ext_cursor_image *args)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc *dc = ext->dc;
	struct nvmap_handle_ref *handle, *old_handle;
	dma_addr_t phys_addr;
	u32 size;
	int ret;

	if (!user->nvmap)
		return -EFAULT;

	size = TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE(args->flags);
#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
	if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 &&
	    size !=  TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64)
		return -EINVAL;
#else
	if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 &&
	    size !=  TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64 &&
	    size !=  TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_128x128 &&
	    size !=  TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_256x256)
		return -EINVAL;
#endif

#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
	if (args->flags && TEGRA_DC_EXT_CURSOR_FLAGS_RGBA_NORMAL)
		return -EINVAL;
#endif

	mutex_lock(&ext->cursor.lock);

	if (ext->cursor.user != user) {
		ret = -EACCES;
		goto unlock;
	}

	if (!ext->enabled) {
		ret = -ENXIO;
		goto unlock;
	}

	old_handle = ext->cursor.cur_handle;

	ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr);
	if (ret)
		goto unlock;

	ext->cursor.cur_handle = handle;

	mutex_lock(&dc->lock);
	tegra_dc_io_start(dc);
	tegra_dc_hold_dc_out(dc);

	set_cursor_image_hw(dc, args, phys_addr);

	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);

	tegra_dc_release_dc_out(dc);
	tegra_dc_io_end(dc);
	/* XXX sync here? */

	mutex_unlock(&dc->lock);

	mutex_unlock(&ext->cursor.lock);

	if (old_handle) {
		nvmap_unpin(ext->nvmap, old_handle);
		nvmap_free(ext->nvmap, old_handle);
	}

	return 0;

unlock:
	mutex_unlock(&ext->cursor.lock);

	return ret;
}
int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user,
				  struct tegra_dc_ext_cursor_image *args)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc *dc = ext->dc;
	struct tegra_dc_dmabuf *handle, *old_handle;
	dma_addr_t phys_addr;
	int ret;
	u32 extformat = TEGRA_DC_EXT_CURSOR_FORMAT_FLAGS(args->flags);
	u32 fg = CURSOR_COLOR(args->foreground.r,
			      args->foreground.g,
			      args->foreground.b);
	u32 bg = CURSOR_COLOR(args->background.r,
			      args->background.g,
			      args->background.b);
	unsigned extsize = TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE(args->flags);
	enum tegra_dc_cursor_size size;
	enum tegra_dc_cursor_format format;

	switch (extsize) {
	case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32:
		size = TEGRA_DC_CURSOR_SIZE_32X32;
		break;
	case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64:
		size = TEGRA_DC_CURSOR_SIZE_64X64;
		break;
	case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_128x128:
		size = TEGRA_DC_CURSOR_SIZE_128X128;
		break;
	case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_256x256:
		size = TEGRA_DC_CURSOR_SIZE_256X256;
		break;
	default:
		return -EINVAL;
	}

	switch (extformat) {
	case TEGRA_DC_EXT_CURSOR_FORMAT_2BIT_LEGACY:
		format = TEGRA_DC_CURSOR_FORMAT_2BIT_LEGACY;
		break;
	case TEGRA_DC_EXT_CURSOR_FORMAT_RGBA_NON_PREMULT_ALPHA:
		format = TEGRA_DC_CURSOR_FORMAT_RGBA_NON_PREMULT_ALPHA;
		break;
	case TEGRA_DC_EXT_CURSOR_FORMAT_RGBA_PREMULT_ALPHA:
		format = TEGRA_DC_CURSOR_FORMAT_RGBA_PREMULT_ALPHA;
		break;
	default:
		return -EINVAL;
	}

	mutex_lock(&ext->cursor.lock);

	if (ext->cursor.user != user) {
		ret = -EACCES;
		goto unlock;
	}

	if (!ext->enabled) {
		ret = -ENXIO;
		goto unlock;
	}

	old_handle = ext->cursor.cur_handle;

	ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr);
	if (ret)
		goto unlock;

	ext->cursor.cur_handle = handle;

	ret = tegra_dc_cursor_image(dc, format, size, fg, bg, phys_addr);

	mutex_unlock(&ext->cursor.lock);

	if (old_handle) {
		dma_buf_unmap_attachment(old_handle->attach,
			old_handle->sgt, DMA_TO_DEVICE);
		dma_buf_detach(old_handle->buf, old_handle->attach);
		dma_buf_put(old_handle->buf);
		kfree(old_handle);
	}

	return ret;

unlock:
	mutex_unlock(&ext->cursor.lock);

	return ret;
}