Exemplo n.º 1
0
void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream)
{
	struct snd_pcm_substream *substream;
	struct snd_dma_buffer *buf;
#if TEGRA30_USE_SMMU
	struct tegra_smmu_data *ptsd;
#endif

	substream = pcm->streams[stream].substream;
	if (!substream)
		return;

	buf = &substream->dma_buffer;
	if (!buf->area)
		return;

#if TEGRA30_USE_SMMU
	if (!buf->private_data)
		return;
	ptsd = (struct tegra_smmu_data *)buf->private_data;
	nvmap_unpin(ptsd->pcm_nvmap_client, ptsd->pcm_nvmap_handle);
	nvmap_munmap(ptsd->pcm_nvmap_handle, buf->area);
	nvmap_free(ptsd->pcm_nvmap_client, ptsd->pcm_nvmap_handle);
	kfree(ptsd);
	buf->private_data = NULL;
#else
	dma_free_writecombine(pcm->card->dev, buf->bytes,
				buf->area, buf->addr);
#endif
	buf->area = NULL;
}
Exemplo n.º 2
0
static int tegra_overlay_flip(struct tegra_overlay_info *overlay,
			      struct tegra_overlay_flip_args *args,
			      struct nvmap_client *user_nvmap)
{
	struct tegra_overlay_flip_data *data;
	struct tegra_overlay_flip_win *flip_win;
	u32 syncpt_max;
	int i, err;

	if (WARN_ON(!overlay->ndev))
		return -EFAULT;

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (data == NULL) {
		dev_err(&overlay->ndev->dev,
			"can't allocate memory for flip\n");
		return -ENOMEM;
	}

	INIT_WORK(&data->work, tegra_overlay_flip_worker);
	data->overlay = overlay;

	for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
		flip_win = &data->win[i];

		memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr));

		if (flip_win->attr.index == -1)
			continue;

		err = tegra_overlay_pin_window(overlay, flip_win, user_nvmap);
		if (err < 0) {
			dev_err(&overlay->ndev->dev,
				"error setting window attributes\n");
			goto surf_err;
		}
	}

	syncpt_max = tegra_dc_incr_syncpt_max(overlay->dc);
	data->syncpt_max = syncpt_max;

	queue_work(overlay->flip_wq, &data->work);

	args->post_syncpt_val = syncpt_max;
	args->post_syncpt_id = tegra_dc_get_syncpt_id(overlay->dc);

	return 0;

surf_err:
	while (i--) {
		if (data->win[i].handle) {
			nvmap_unpin(overlay->overlay_nvmap,
				    data->win[i].handle);
			nvmap_free(overlay->overlay_nvmap,
				   data->win[i].handle);
		}
	}
	kfree(data);
	return err;
}
Exemplo n.º 3
0
void nvhost_nvmap_unpin(struct mem_mgr *mgr,
		struct mem_handle *handle, struct sg_table *sgt)
{
	kfree(sgt);
	return nvmap_unpin((struct nvmap_client *)mgr,
			(struct nvmap_handle_ref *)handle);
}
Exemplo n.º 4
0
static void ctx3d_free(struct kref *ref)
{
	struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref);
        nvmap_unpin(&ctx->restore, 1);
	nvmap_free(ctx->restore, ctx->save_cpu_data);
	kfree(ctx);
}
Exemplo n.º 5
0
static void tegra_overlay_flip_worker(struct work_struct *work)
{
	struct tegra_overlay_flip_data *data =
		container_of(work, struct tegra_overlay_flip_data, work);
	struct tegra_overlay_info *overlay = data->overlay;
	struct tegra_dc_win *win;
	struct tegra_dc_win *wins[TEGRA_FB_FLIP_N_WINDOWS];
	struct nvmap_handle_ref *unpin_handles[TEGRA_FB_FLIP_N_WINDOWS];
	int i, nr_win = 0, nr_unpin = 0;

	data = container_of(work, struct tegra_overlay_flip_data, work);

	for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
		struct tegra_overlay_flip_win *flip_win = &data->win[i];
		int idx = flip_win->attr.index;

		if (idx == -1)
			continue;

		win = tegra_dc_get_window(overlay->dc, idx);

		if (!win)
			continue;

		if (win->flags && win->cur_handle)
			unpin_handles[nr_unpin++] = win->cur_handle;

		tegra_overlay_set_windowattr(overlay, win, &data->win[i]);

		wins[nr_win++] = win;

#if 0
		if (flip_win->attr.pre_syncpt_id < 0)
			continue;
		printk("%08x %08x\n",
		       flip_win->attr.pre_syncpt_id,
		       flip_win->attr.pre_syncpt_val);

		nvhost_syncpt_wait_timeout(&overlay->ndev->host->syncpt,
					   flip_win->attr.pre_syncpt_id,
					   flip_win->attr.pre_syncpt_val,
					   msecs_to_jiffies(500));
#endif
	}

	tegra_dc_update_windows(wins, nr_win);
	/* TODO: implement swapinterval here */
	tegra_dc_sync_windows(wins, nr_win);

	tegra_dc_incr_syncpt_min(overlay->dc, data->syncpt_max);

	/* unpin and deref previous front buffers */
	for (i = 0; i < nr_unpin; i++) {
		nvmap_unpin(overlay->overlay_nvmap, unpin_handles[i]);
		nvmap_free(overlay->overlay_nvmap, unpin_handles[i]);
	}

	kfree(data);
}
Exemplo n.º 6
0
static void tegra_overlay_n_shot(struct tegra_overlay_flip_data *data,
			struct nvmap_handle_ref **unpin_handles, int *nr_unpin)
{
	int i;
	struct tegra_overlay_info *overlay = data->overlay;
	u32 didim_delay = overlay->dc->out->sd_settings->hw_update_delay;
	u32 didim_enable = overlay->dc->out->sd_settings->enable;

	mutex_lock(&overlay->lock);

	if (data->didim_work) {
		/* Increment sync point if we finish n shot;
		 * otherwise send overlay flip request. */
		if (overlay->n_shot)
			overlay->n_shot--;

		if (overlay->n_shot && didim_enable) {
			tegra_overlay_flip_didim(data);
			mutex_unlock(&overlay->lock);
			return;
		} else {
			*nr_unpin = data->nr_unpin;
			for (i = 0; i < *nr_unpin; i++)
				unpin_handles[i] = data->unpin_handles[i];
			tegra_dc_incr_syncpt_min(overlay->dc, 0,
						data->syncpt_max);
		}
	} else {
		overlay->overlay_ref--;
		/* If no new flip request in the queue, we will send
		 * the last frame n times for DIDIM */
		if (!overlay->overlay_ref && didim_enable)
			overlay->n_shot = TEGRA_DC_DIDIM_MIN_SHOT + didim_delay;

		if (overlay->n_shot && didim_enable) {
			data->nr_unpin = *nr_unpin;
			data->didim_work = true;
			for (i = 0; i < *nr_unpin; i++)
				data->unpin_handles[i] = unpin_handles[i];
			tegra_overlay_flip_didim(data);
			mutex_unlock(&overlay->lock);
			return;
		} else {
			tegra_dc_incr_syncpt_min(overlay->dc, 0,
						data->syncpt_max);
		}
	}

	mutex_unlock(&overlay->lock);

	/* unpin and deref previous front buffers */
	for (i = 0; i < *nr_unpin; i++) {
		nvmap_unpin(overlay->overlay_nvmap, unpin_handles[i]);
		nvmap_free(overlay->overlay_nvmap, unpin_handles[i]);
	}

	kfree(data);
}
void nvhost_3dctx_free(struct kref *ref)
{
	struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref);
	struct nvmap_client *nvmap = ctx->channel->dev->nvmap;

	if (ctx->restore_virt) {
		nvmap_munmap(ctx->restore, ctx->restore_virt);
		ctx->restore_virt = NULL;
	}
	nvmap_unpin(nvmap, ctx->restore);
	ctx->restore_phys = 0;
	nvmap_free(nvmap, ctx->restore);
	ctx->restore = NULL;
	kfree(ctx);
}
Exemplo n.º 8
0
static int tegra_dc_ext_flip(struct tegra_dc_ext_user *user,
			     struct tegra_dc_ext_flip *args)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc_ext_flip_data *data;
	int work_index;
	int i, ret = 0;

	if (!user->nvmap)
		return -EFAULT;

	ret = sanitize_flip_args(user, args);
	if (ret)
		return ret;

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	INIT_WORK(&data->work, tegra_dc_ext_flip_worker);
	data->ext = ext;

	for (i = 0; i < DC_N_WINDOWS; i++) {
		struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
		int index = args->win[i].index;

		memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr));

		if (index < 0)
			continue;

		ret = tegra_dc_ext_pin_window(user, flip_win->attr.buff_id,
					      &flip_win->handle[TEGRA_DC_Y],
					      &flip_win->phys_addr);
		if (ret)
			goto fail_pin;

		if (flip_win->attr.buff_id_u) {
			ret = tegra_dc_ext_pin_window(user,
					      flip_win->attr.buff_id_u,
					      &flip_win->handle[TEGRA_DC_U],
					      &flip_win->phys_addr_u);
			if (ret)
				goto fail_pin;
		} else {
			flip_win->handle[TEGRA_DC_U] = NULL;
			flip_win->phys_addr_u = 0;
		}

		if (flip_win->attr.buff_id_v) {
			ret = tegra_dc_ext_pin_window(user,
					      flip_win->attr.buff_id_v,
					      &flip_win->handle[TEGRA_DC_V],
					      &flip_win->phys_addr_v);
			if (ret)
				goto fail_pin;
		} else {
			flip_win->handle[TEGRA_DC_V] = NULL;
			flip_win->phys_addr_v = 0;
		}
	}

	ret = lock_windows_for_flip(user, args);
	if (ret)
		goto fail_pin;

	if (!ext->enabled) {
		ret = -ENXIO;
		goto unlock;
	}

	for (i = 0; i < DC_N_WINDOWS; i++) {
		u32 syncpt_max;
		int index = args->win[i].index;

		if (index < 0)
			continue;

		syncpt_max = tegra_dc_incr_syncpt_max(ext->dc, index);

		data->win[i].syncpt_max = syncpt_max;

		/*
		 * Any of these windows' syncpoints should be equivalent for
		 * the client, so we just send back an arbitrary one of them
		 */
		args->post_syncpt_val = syncpt_max;
		args->post_syncpt_id = tegra_dc_get_syncpt_id(ext->dc, index);
		work_index = index;
	}
	queue_work(ext->win[work_index].flip_wq, &data->work);

	unlock_windows_for_flip(user, args);

	return 0;

unlock:
	unlock_windows_for_flip(user, args);

fail_pin:
	for (i = 0; i < DC_N_WINDOWS; i++) {
		int j;
		for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) {
			if (!data->win[i].handle[j])
				continue;

			nvmap_unpin(ext->nvmap, data->win[i].handle[j]);
			nvmap_free(ext->nvmap, data->win[i].handle[j]);
		}
	}
	kfree(data);

	return ret;
}
Exemplo n.º 9
0
static void tegra_dc_ext_flip_worker(struct work_struct *work)
{
	struct tegra_dc_ext_flip_data *data =
		container_of(work, struct tegra_dc_ext_flip_data, work);
	struct tegra_dc_ext *ext = data->ext;
	struct tegra_dc_win *wins[DC_N_WINDOWS];
	struct nvmap_handle_ref *unpin_handles[DC_N_WINDOWS *
					       TEGRA_DC_NUM_PLANES];
	int i, nr_unpin = 0, nr_win = 0;

	for (i = 0; i < DC_N_WINDOWS; i++) {
		struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
		int index = flip_win->attr.index;
		struct tegra_dc_win *win;
		struct tegra_dc_ext_win *ext_win;

		if (index < 0)
			continue;

		win = tegra_dc_get_window(ext->dc, index);
		ext_win = &ext->win[index];

		if (win->flags & TEGRA_WIN_FLAG_ENABLED) {
			int j;
			for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) {
				if (!ext_win->cur_handle[j])
					continue;

				unpin_handles[nr_unpin++] =
					ext_win->cur_handle[j];
			}
		}

		tegra_dc_ext_set_windowattr(ext, win, &data->win[i]);

		wins[nr_win++] = win;
	}

	tegra_dc_update_windows(wins, nr_win);
	/* TODO: implement swapinterval here */
	tegra_dc_sync_windows(wins, nr_win);

	for (i = 0; i < DC_N_WINDOWS; i++) {
		struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
		int index = flip_win->attr.index;

		if (index < 0)
			continue;

		tegra_dc_incr_syncpt_min(ext->dc, index,
			flip_win->syncpt_max);
	}

	/* unpin and deref previous front buffers */
	for (i = 0; i < nr_unpin; i++) {
		nvmap_unpin(ext->nvmap, unpin_handles[i]);
		nvmap_free(ext->nvmap, unpin_handles[i]);
	}

	kfree(data);
}
Exemplo n.º 10
0
static void tegra_dc_ext_flip_worker(struct work_struct *work)
{
	struct tegra_dc_ext_flip_data *data =
		container_of(work, struct tegra_dc_ext_flip_data, work);
	struct tegra_dc_ext *ext = data->ext;
	struct tegra_dc_win *wins[DC_N_WINDOWS];
	struct nvmap_handle_ref *unpin_handles[DC_N_WINDOWS *
					       TEGRA_DC_NUM_PLANES];
	struct nvmap_handle_ref *old_handle;
	int i, nr_unpin = 0, nr_win = 0;
	bool skip_flip = false;

	for (i = 0; i < DC_N_WINDOWS; i++) {
		struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
		int index = flip_win->attr.index;
		struct tegra_dc_win *win;
		struct tegra_dc_ext_win *ext_win;

		if (index < 0)
			continue;

		win = tegra_dc_get_window(ext->dc, index);
		ext_win = &ext->win[index];

		if (!(atomic_dec_and_test(&ext_win->nr_pending_flips)) &&
			(flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_CURSOR))
			skip_flip = true;

		if (skip_flip)
			old_handle = flip_win->handle[TEGRA_DC_Y];
		else
			old_handle = ext_win->cur_handle[TEGRA_DC_Y];

		if (old_handle) {
			int j;
			for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) {
				if (skip_flip)
					old_handle = flip_win->handle[j];
				else
					old_handle = ext_win->cur_handle[j];

				if (!old_handle)
					continue;

				unpin_handles[nr_unpin++] = old_handle;
			}
		}

		if (!skip_flip)
			tegra_dc_ext_set_windowattr(ext, win, &data->win[i]);

		wins[nr_win++] = win;
	}

	if (!skip_flip) {
		tegra_dc_update_windows(wins, nr_win);
		/* TODO: implement swapinterval here */
		tegra_dc_sync_windows(wins, nr_win);
	}

	for (i = 0; i < DC_N_WINDOWS; i++) {
		struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
		int index = flip_win->attr.index;

		if (index < 0)
			continue;

		tegra_dc_incr_syncpt_min(ext->dc, index,
			flip_win->syncpt_max);
	}

	/* unpin and deref previous front buffers */
	for (i = 0; i < nr_unpin; i++) {
		nvmap_unpin(ext->nvmap, unpin_handles[i]);
		nvmap_free(ext->nvmap, unpin_handles[i]);
	}

	kfree(data);
}
Exemplo n.º 11
0
int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user,
				  struct tegra_dc_ext_cursor_image *args)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc *dc = ext->dc;
	struct nvmap_handle_ref *handle, *old_handle;
	dma_addr_t phys_addr;
	u32 size;
	int ret;

	if (!user->nvmap)
		return -EFAULT;

	size = TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE(args->flags);
#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
	if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 &&
	    size !=  TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64)
		return -EINVAL;
#else
	if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 &&
	    size !=  TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64 &&
	    size !=  TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_128x128 &&
	    size !=  TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_256x256)
		return -EINVAL;
#endif

#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
	if (args->flags && TEGRA_DC_EXT_CURSOR_FLAGS_RGBA_NORMAL)
		return -EINVAL;
#endif

	mutex_lock(&ext->cursor.lock);

	if (ext->cursor.user != user) {
		ret = -EACCES;
		goto unlock;
	}

	if (!ext->enabled) {
		ret = -ENXIO;
		goto unlock;
	}

	old_handle = ext->cursor.cur_handle;

	ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr);
	if (ret)
		goto unlock;

	ext->cursor.cur_handle = handle;

	mutex_lock(&dc->lock);
	tegra_dc_io_start(dc);
	tegra_dc_hold_dc_out(dc);

	set_cursor_image_hw(dc, args, phys_addr);

	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);

	tegra_dc_release_dc_out(dc);
	tegra_dc_io_end(dc);
	/* XXX sync here? */

	mutex_unlock(&dc->lock);

	mutex_unlock(&ext->cursor.lock);

	if (old_handle) {
		nvmap_unpin(ext->nvmap, old_handle);
		nvmap_free(ext->nvmap, old_handle);
	}

	return 0;

unlock:
	mutex_unlock(&ext->cursor.lock);

	return ret;
}
Exemplo n.º 12
0
static int tegra_overlay_flip(struct tegra_overlay_info *overlay,
			      struct tegra_overlay_flip_args *args,
			      struct nvmap_client *user_nvmap)
{
	struct tegra_overlay_flip_data *data;
	struct tegra_overlay_flip_win *flip_win;
	u32 syncpt_max;
	int i, err;

	if (WARN_ON(!overlay->ndev))
		return -EFAULT;

	mutex_lock(&tegra_flip_lock);
	if (!overlay->dc->enabled) {
		mutex_unlock(&tegra_flip_lock);
		return -EFAULT;
	}

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (data == NULL) {
		dev_err(&overlay->ndev->dev,
			"can't allocate memory for flip\n");
		mutex_unlock(&tegra_flip_lock);
		return -ENOMEM;
	}

	INIT_WORK(&data->work, tegra_overlay_flip_worker);
	data->overlay = overlay;
	data->flags = args->flags;

	for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
		flip_win = &data->win[i];

		memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr));

		if (flip_win->attr.index == -1)
			continue;

		err = tegra_overlay_pin_window(overlay, flip_win, user_nvmap);
		if (err < 0) {
			dev_err(&overlay->ndev->dev,
				"error setting window attributes\n");
			goto surf_err;
		}
	}

	syncpt_max = tegra_dc_incr_syncpt_max(overlay->dc);
	data->syncpt_max = syncpt_max;

	queue_work(overlay->flip_wq, &data->work);

	/*
	 * Before the queued flip_wq get scheduled, we set the EMC clock to the
	 * default value in order to do FLIP without glitch.
	 */
	tegra_dc_set_default_emc(overlay->dc);

	args->post_syncpt_val = syncpt_max;
	args->post_syncpt_id = tegra_dc_get_syncpt_id(overlay->dc);
	mutex_unlock(&tegra_flip_lock);

	return 0;

surf_err:
	while (i--) {
		if (data->win[i].handle) {
			nvmap_unpin(overlay->overlay_nvmap,
				    data->win[i].handle);
			nvmap_free(overlay->overlay_nvmap,
				   data->win[i].handle);
		}
	}
	kfree(data);
	mutex_unlock(&tegra_flip_lock);
	return err;
}
Exemplo n.º 13
0
static void tegra_overlay_flip_worker(struct work_struct *work)
{
	struct tegra_overlay_flip_data *data =
		container_of(work, struct tegra_overlay_flip_data, work);
	struct tegra_overlay_info *overlay = data->overlay;
	struct tegra_dc_win *win;
	struct tegra_dc_win *wins[TEGRA_FB_FLIP_N_WINDOWS];
	struct nvmap_handle_ref *unpin_handles[TEGRA_FB_FLIP_N_WINDOWS];
	int i, nr_win = 0, nr_unpin = 0;

	data = container_of(work, struct tegra_overlay_flip_data, work);

	for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
		struct tegra_overlay_flip_win *flip_win = &data->win[i];
		int idx = flip_win->attr.index;

		if (idx == -1)
			continue;

		win = tegra_dc_get_window(overlay->dc, idx);

		if (!win)
			continue;

		if (win->flags && win->cur_handle && !data->didim_work)
				unpin_handles[nr_unpin++] = win->cur_handle;

		tegra_overlay_set_windowattr(overlay, win, &data->win[i]);

		wins[nr_win++] = win;

#if 0
		if (flip_win->attr.pre_syncpt_id < 0)
			continue;
		printk("%08x %08x\n",
		       flip_win->attr.pre_syncpt_id,
		       flip_win->attr.pre_syncpt_val);

		nvhost_syncpt_wait_timeout(&nvhost_get_host(overlay->ndev)->syncpt,
					   flip_win->attr.pre_syncpt_id,
					   flip_win->attr.pre_syncpt_val,
					   msecs_to_jiffies(500));
#endif
	}

	if (data->flags & TEGRA_OVERLAY_FLIP_FLAG_BLEND_REORDER) {
		struct tegra_dc_win *dcwins[DC_N_WINDOWS];

		for (i = 0; i < DC_N_WINDOWS; i++)
			dcwins[i] = tegra_dc_get_window(overlay->dc, i);

		tegra_overlay_blend_reorder(&overlay->blend, dcwins);
		tegra_dc_update_windows(dcwins, DC_N_WINDOWS);
		tegra_dc_sync_windows(dcwins, DC_N_WINDOWS);
	} else {
		tegra_dc_update_windows(wins, nr_win);
		/* TODO: implement swapinterval here */
		tegra_dc_sync_windows(wins, nr_win);
	}

	if ((overlay->dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) &&
		(overlay->dc->out->flags & TEGRA_DC_OUT_N_SHOT_MODE)) {
		tegra_overlay_n_shot(data, unpin_handles, &nr_unpin);
	} else {
		tegra_dc_incr_syncpt_min(overlay->dc, 0, data->syncpt_max);

		/* unpin and deref previous front buffers */
		for (i = 0; i < nr_unpin; i++) {
			nvmap_unpin(overlay->overlay_nvmap, unpin_handles[i]);
			nvmap_free(overlay->overlay_nvmap, unpin_handles[i]);
		}

		kfree(data);
	}
}
Exemplo n.º 14
0
int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user,
				  struct tegra_dc_ext_cursor_image *args)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc *dc = ext->dc;
	struct nvmap_handle_ref *handle, *old_handle;
	dma_addr_t phys_addr;
	u32 size;
	int ret;

	if (!user->nvmap)
		return -EFAULT;

	size = args->flags & (TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 |
			      TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64);

	if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 &&
	    size !=  TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64)
		return -EINVAL;

	mutex_lock(&ext->cursor.lock);

	if (ext->cursor.user != user) {
		ret = -EACCES;
		goto unlock;
	}

	if (!ext->enabled) {
		ret = -ENXIO;
		goto unlock;
	}

	old_handle = ext->cursor.cur_handle;

	ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr);
	if (ret)
		goto unlock;

	ext->cursor.cur_handle = handle;

	mutex_lock(&dc->lock);

	set_cursor_image_hw(dc, args, phys_addr);

	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);

	/* XXX sync here? */

	mutex_unlock(&dc->lock);

	mutex_unlock(&ext->cursor.lock);

	if (old_handle) {
		nvmap_unpin(ext->nvmap, old_handle);
		nvmap_free(ext->nvmap, old_handle);
	}

	return 0;

unlock:
	mutex_unlock(&ext->cursor.lock);

	return ret;
}
Exemplo n.º 15
0
static void tegra_dc_ext_flip_worker(struct work_struct *work)
{
	struct tegra_dc_ext_flip_data *data =
		container_of(work, struct tegra_dc_ext_flip_data, work);
	struct tegra_dc_ext *ext = data->ext;
	struct tegra_dc_win *wins[DC_N_WINDOWS];
	struct nvmap_handle_ref *unpin_handles[DC_N_WINDOWS];
	int i, nr_unpin = 0, nr_win = 0, nr_disable = 0;

	for (i = 0; i < DC_N_WINDOWS; i++) {
		struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
		int index = flip_win->attr.index;
		struct tegra_dc_win *win;
		struct tegra_dc_ext_win *ext_win;
		bool old_ena, new_ena;

		if (index < 0)
			continue;

		win = tegra_dc_get_window(ext->dc, index);
		ext_win = &ext->win[index];

		old_ena = ext->win[index].enabled;
		new_ena = flip_win->handle != NULL;
		if (old_ena != new_ena) {
			if (new_ena)
				process_window_change(ext, 1);
			else
				nr_disable++;
		}
		ext->win[index].enabled = new_ena;

		if (old_ena && ext_win->cur_handle)
			unpin_handles[nr_unpin++] = ext_win->cur_handle;

		tegra_dc_ext_set_windowattr(ext, win, &data->win[i]);

		wins[nr_win++] = win;
	}

	tegra_dc_update_windows(wins, nr_win);
	/* TODO: implement swapinterval here */
	tegra_dc_sync_windows(wins, nr_win);

	for (i = 0; i < DC_N_WINDOWS; i++) {
		struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
		int index = flip_win->attr.index;

		if (index < 0)
			continue;

		tegra_dc_incr_syncpt_min(ext->dc, index,
			flip_win->syncpt_max);
	}

	/* unpin and deref previous front buffers */
	for (i = 0; i < nr_unpin; i++) {
		nvmap_unpin(ext->nvmap, unpin_handles[i]);
		nvmap_free(ext->nvmap, unpin_handles[i]);
	}

	if (nr_disable)
		process_window_change(ext, -nr_disable);

	kfree(data);
}