static int tegra_overlay_flip(struct tegra_overlay_info *overlay, struct tegra_overlay_flip_args *args, struct nvmap_client *user_nvmap) { struct tegra_overlay_flip_data *data; struct tegra_overlay_flip_win *flip_win; u32 syncpt_max; int i, err; if (WARN_ON(!overlay->ndev)) return -EFAULT; data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) { dev_err(&overlay->ndev->dev, "can't allocate memory for flip\n"); return -ENOMEM; } INIT_WORK(&data->work, tegra_overlay_flip_worker); data->overlay = overlay; for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) { flip_win = &data->win[i]; memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr)); if (flip_win->attr.index == -1) continue; err = tegra_overlay_pin_window(overlay, flip_win, user_nvmap); if (err < 0) { dev_err(&overlay->ndev->dev, "error setting window attributes\n"); goto surf_err; } } syncpt_max = tegra_dc_incr_syncpt_max(overlay->dc); data->syncpt_max = syncpt_max; queue_work(overlay->flip_wq, &data->work); args->post_syncpt_val = syncpt_max; args->post_syncpt_id = tegra_dc_get_syncpt_id(overlay->dc); return 0; surf_err: while (i--) { if (data->win[i].handle) { nvmap_unpin(overlay->overlay_nvmap, data->win[i].handle); nvmap_free(overlay->overlay_nvmap, data->win[i].handle); } } kfree(data); return err; }
void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; #if TEGRA30_USE_SMMU struct tegra_smmu_data *ptsd; #endif substream = pcm->streams[stream].substream; if (!substream) return; buf = &substream->dma_buffer; if (!buf->area) return; #if TEGRA30_USE_SMMU if (!buf->private_data) return; ptsd = (struct tegra_smmu_data *)buf->private_data; nvmap_unpin(ptsd->pcm_nvmap_client, ptsd->pcm_nvmap_handle); nvmap_munmap(ptsd->pcm_nvmap_handle, buf->area); nvmap_free(ptsd->pcm_nvmap_client, ptsd->pcm_nvmap_handle); kfree(ptsd); buf->private_data = NULL; #else dma_free_writecombine(pcm->card->dev, buf->bytes, buf->area, buf->addr); #endif buf->area = NULL; }
static void ctx3d_free(struct kref *ref) { struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref); nvmap_unpin(&ctx->restore, 1); nvmap_free(ctx->restore, ctx->save_cpu_data); kfree(ctx); }
static void tegra_overlay_flip_worker(struct work_struct *work) { struct tegra_overlay_flip_data *data = container_of(work, struct tegra_overlay_flip_data, work); struct tegra_overlay_info *overlay = data->overlay; struct tegra_dc_win *win; struct tegra_dc_win *wins[TEGRA_FB_FLIP_N_WINDOWS]; struct nvmap_handle_ref *unpin_handles[TEGRA_FB_FLIP_N_WINDOWS]; int i, nr_win = 0, nr_unpin = 0; data = container_of(work, struct tegra_overlay_flip_data, work); for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) { struct tegra_overlay_flip_win *flip_win = &data->win[i]; int idx = flip_win->attr.index; if (idx == -1) continue; win = tegra_dc_get_window(overlay->dc, idx); if (!win) continue; if (win->flags && win->cur_handle) unpin_handles[nr_unpin++] = win->cur_handle; tegra_overlay_set_windowattr(overlay, win, &data->win[i]); wins[nr_win++] = win; #if 0 if (flip_win->attr.pre_syncpt_id < 0) continue; printk("%08x %08x\n", flip_win->attr.pre_syncpt_id, flip_win->attr.pre_syncpt_val); nvhost_syncpt_wait_timeout(&overlay->ndev->host->syncpt, flip_win->attr.pre_syncpt_id, flip_win->attr.pre_syncpt_val, msecs_to_jiffies(500)); #endif } tegra_dc_update_windows(wins, nr_win); /* TODO: implement swapinterval here */ tegra_dc_sync_windows(wins, nr_win); tegra_dc_incr_syncpt_min(overlay->dc, data->syncpt_max); /* unpin and deref previous front buffers */ for (i = 0; i < nr_unpin; i++) { nvmap_unpin(overlay->overlay_nvmap, unpin_handles[i]); nvmap_free(overlay->overlay_nvmap, unpin_handles[i]); } kfree(data); }
static void tegra_overlay_n_shot(struct tegra_overlay_flip_data *data, struct nvmap_handle_ref **unpin_handles, int *nr_unpin) { int i; struct tegra_overlay_info *overlay = data->overlay; u32 didim_delay = overlay->dc->out->sd_settings->hw_update_delay; u32 didim_enable = overlay->dc->out->sd_settings->enable; mutex_lock(&overlay->lock); if (data->didim_work) { /* Increment sync point if we finish n shot; * otherwise send overlay flip request. */ if (overlay->n_shot) overlay->n_shot--; if (overlay->n_shot && didim_enable) { tegra_overlay_flip_didim(data); mutex_unlock(&overlay->lock); return; } else { *nr_unpin = data->nr_unpin; for (i = 0; i < *nr_unpin; i++) unpin_handles[i] = data->unpin_handles[i]; tegra_dc_incr_syncpt_min(overlay->dc, 0, data->syncpt_max); } } else { overlay->overlay_ref--; /* If no new flip request in the queue, we will send * the last frame n times for DIDIM */ if (!overlay->overlay_ref && didim_enable) overlay->n_shot = TEGRA_DC_DIDIM_MIN_SHOT + didim_delay; if (overlay->n_shot && didim_enable) { data->nr_unpin = *nr_unpin; data->didim_work = true; for (i = 0; i < *nr_unpin; i++) data->unpin_handles[i] = unpin_handles[i]; tegra_overlay_flip_didim(data); mutex_unlock(&overlay->lock); return; } else { tegra_dc_incr_syncpt_min(overlay->dc, 0, data->syncpt_max); } } mutex_unlock(&overlay->lock); /* unpin and deref previous front buffers */ for (i = 0; i < *nr_unpin; i++) { nvmap_unpin(overlay->overlay_nvmap, unpin_handles[i]); nvmap_free(overlay->overlay_nvmap, unpin_handles[i]); } kfree(data); }
static void free_gathers(struct nvhost_job *job) { if (job->gathers) { nvmap_munmap(job->gather_mem, job->gathers); job->gathers = NULL; } if (job->gather_mem) { nvmap_free(job->nvmap, job->gather_mem); job->gather_mem = NULL; } }
static void job_free(struct kref *ref) { struct nvhost_job *job = container_of(ref, struct nvhost_job, ref); if (job->gathers) nvmap_munmap(job->gather_mem, job->gathers); if (job->gather_mem) nvmap_free(job->nvmap, job->gather_mem); if (job->nvmap) nvmap_client_put(job->nvmap); vfree(job); }
struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init( u32 syncpt, u32 waitbase, struct nvhost_channel *ch) { struct nvmap_client *nvmap; u32 *save_ptr; struct host1x_hwctx_handler *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; nvmap = nvhost_get_host(ch->dev)->nvmap; register_sets = tegra_gpu_register_sets(); BUG_ON(register_sets == 0 || register_sets > 2); p->syncpt = syncpt; p->waitbase = waitbase; setup_save(p, NULL); p->save_buf = nvmap_alloc(nvmap, p->save_size * 4, 32, NVMAP_HANDLE_WRITE_COMBINE, 0); if (IS_ERR(p->save_buf)) { p->save_buf = NULL; return NULL; } p->save_slots = 6; if (register_sets == 2) p->save_slots += 2; save_ptr = nvmap_mmap(p->save_buf); if (!save_ptr) { nvmap_free(nvmap, p->save_buf); p->save_buf = NULL; return NULL; } p->save_phys = nvmap_pin(nvmap, p->save_buf); setup_save(p, save_ptr); p->h.alloc = ctx3d_alloc_v1; p->h.save_push = save_push_v1; p->h.save_service = NULL; p->h.get = nvhost_3dctx_get; p->h.put = nvhost_3dctx_put; return &p->h; }
int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 id, struct nvmap_handle_ref **handle, dma_addr_t *phys_addr) { struct tegra_dc_ext *ext = user->ext; struct nvmap_handle_ref *win_dup; ulong win_handle_id; dma_addr_t phys; if (!id) { *handle = NULL; *phys_addr = -1; return 0; } /* * Take a reference to the buffer using the user's nvmap context, to * make sure they have permissions to access it. */ win_handle_id = nvmap_get_handle_user_id(user->nvmap, id); if (!win_handle_id) return -EACCES; /* * Duplicate the buffer's handle into the dc_ext driver's nvmap * context, to ensure that the handle won't be freed as long as it is * in use by display. */ win_dup = nvmap_duplicate_handle_user_id(ext->nvmap, id); /* Release the reference we took in the user's context above */ nvmap_put_handle_user_id(win_handle_id); if (IS_ERR(win_dup)) return PTR_ERR(win_dup); phys = nvmap_pin(ext->nvmap, win_dup); /* XXX this isn't correct for non-pointers... */ if (IS_ERR((void *)phys)) { nvmap_free(ext->nvmap, win_dup); return PTR_ERR((void *)phys); } *phys_addr = phys; *handle = win_dup; return 0; }
void nvhost_3dctx_free(struct kref *ref) { struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref); struct nvmap_client *nvmap = ctx->channel->dev->nvmap; if (ctx->restore_virt) { nvmap_munmap(ctx->restore, ctx->restore_virt); ctx->restore_virt = NULL; } nvmap_unpin(nvmap, ctx->restore); ctx->restore_phys = 0; nvmap_free(nvmap, ctx->restore); ctx->restore = NULL; kfree(ctx); }
struct nvhost_hwctx *nvhost_3dctx_alloc_common(struct nvhost_channel *ch, bool map_restore) { struct nvmap_client *nvmap = ch->dev->nvmap; struct nvhost_hwctx *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; ctx->restore = nvmap_alloc(nvmap, nvhost_3dctx_restore_size * 4, 32, map_restore ? NVMAP_HANDLE_WRITE_COMBINE : NVMAP_HANDLE_UNCACHEABLE); if (IS_ERR_OR_NULL(ctx->restore)) goto fail; if (map_restore) { ctx->restore_virt = nvmap_mmap(ctx->restore); if (!ctx->restore_virt) goto fail; } else ctx->restore_virt = NULL; kref_init(&ctx->ref); ctx->channel = ch; ctx->valid = false; ctx->save = nvhost_3dctx_save_buf; ctx->save_incrs = nvhost_3dctx_save_incrs; ctx->save_thresh = nvhost_3dctx_save_thresh; ctx->save_slots = nvhost_3dctx_save_slots; ctx->restore_phys = nvmap_pin(nvmap, ctx->restore); if (IS_ERR_VALUE(ctx->restore_phys)) goto fail; ctx->restore_size = nvhost_3dctx_restore_size; ctx->restore_incrs = nvhost_3dctx_restore_incrs; return ctx; fail: if (map_restore && ctx->restore_virt) { nvmap_munmap(ctx->restore, ctx->restore_virt); ctx->restore_virt = NULL; } nvmap_free(nvmap, ctx->restore); ctx->restore = NULL; kfree(ctx); return NULL; }
int __init nvhost_gr3d_t30_ctxhandler_init(struct nvhost_hwctx_handler *h) { struct nvhost_channel *ch; struct nvmap_client *nvmap; u32 *save_ptr; ch = container_of(h, struct nvhost_channel, ctxhandler); nvmap = ch->dev->nvmap; register_sets = tegra_gpu_register_sets(); BUG_ON(register_sets == 0 || register_sets > 2); setup_save(NULL); nvhost_3dctx_save_buf = nvmap_alloc(nvmap, save_size * 4, 32, NVMAP_HANDLE_WRITE_COMBINE); if (IS_ERR(nvhost_3dctx_save_buf)) { int err = PTR_ERR(nvhost_3dctx_save_buf); nvhost_3dctx_save_buf = NULL; return err; } nvhost_3dctx_save_slots = 6; if (register_sets == 2) nvhost_3dctx_save_slots += 2; save_ptr = nvmap_mmap(nvhost_3dctx_save_buf); if (!save_ptr) { nvmap_free(nvmap, nvhost_3dctx_save_buf); nvhost_3dctx_save_buf = NULL; return -ENOMEM; } save_phys = nvmap_pin(nvmap, nvhost_3dctx_save_buf); setup_save(save_ptr); h->alloc = ctx3d_alloc_v1; h->save_push = save_push_v1; h->save_service = NULL; h->get = nvhost_3dctx_get; h->put = nvhost_3dctx_put; return 0; }
/* Overlay window manipulation */ static int tegra_overlay_pin_window(struct tegra_overlay_info *overlay, struct tegra_overlay_flip_win *flip_win, struct nvmap_client *user_nvmap) { struct nvmap_handle_ref *win_dupe; struct nvmap_handle *win_handle; unsigned long buff_id = flip_win->attr.buff_id; if (!buff_id) return 0; win_handle = nvmap_get_handle_id(user_nvmap, buff_id); if (win_handle == NULL) { dev_err(&overlay->ndev->dev, "%s: flip invalid " "handle %08lx\n", current->comm, buff_id); return -EPERM; } /* duplicate the new framebuffer's handle into the fb driver's * nvmap context, to ensure that the handle won't be freed as * long as it is in-use by the fb driver */ win_dupe = nvmap_duplicate_handle_id(overlay->overlay_nvmap, buff_id); nvmap_handle_put(win_handle); if (IS_ERR(win_dupe)) { dev_err(&overlay->ndev->dev, "couldn't duplicate handle\n"); return PTR_ERR(win_dupe); } flip_win->handle = win_dupe; flip_win->phys_addr = nvmap_pin(overlay->overlay_nvmap, win_dupe); if (IS_ERR((void *)flip_win->phys_addr)) { dev_err(&overlay->ndev->dev, "couldn't pin handle\n"); nvmap_free(overlay->overlay_nvmap, win_dupe); return PTR_ERR((void *)flip_win->phys_addr); } return 0; }
static int nvhost_channelrelease(struct inode *inode, struct file *filp) { struct nvhost_channel_userctx *priv = filp->private_data; trace_nvhost_channel_release(priv->ch->desc->name); filp->private_data = NULL; nvhost_module_remove_client(priv->ch->dev, &priv->ch->mod, priv); nvhost_putchannel(priv->ch, priv->hwctx); if (priv->hwctx) priv->ch->ctxhandler.put(priv->hwctx); if (priv->gathers) nvmap_munmap(priv->gather_mem, priv->gathers); if (!IS_ERR_OR_NULL(priv->gather_mem)) nvmap_free(priv->ch->dev->nvmap, priv->gather_mem); nvmap_client_put(priv->nvmap); kfree(priv); return 0; }
int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user, struct tegra_dc_ext_cursor_image *args) { struct tegra_dc_ext *ext = user->ext; struct tegra_dc *dc = ext->dc; struct nvmap_handle_ref *handle, *old_handle; dma_addr_t phys_addr; u32 size; int ret; if (!user->nvmap) return -EFAULT; size = args->flags & (TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 | TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64); if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 && size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64) return -EINVAL; mutex_lock(&ext->cursor.lock); if (ext->cursor.user != user) { ret = -EACCES; goto unlock; } if (!ext->enabled) { ret = -ENXIO; goto unlock; } old_handle = ext->cursor.cur_handle; ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr); if (ret) goto unlock; ext->cursor.cur_handle = handle; mutex_lock(&dc->lock); set_cursor_image_hw(dc, args, phys_addr); tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); /* XXX sync here? */ mutex_unlock(&dc->lock); mutex_unlock(&ext->cursor.lock); if (old_handle) { nvmap_unpin(ext->nvmap, old_handle); nvmap_free(ext->nvmap, old_handle); } return 0; unlock: mutex_unlock(&ext->cursor.lock); return ret; }
void nvhost_nvmap_put(struct mem_mgr *mgr, struct mem_handle *handle) { nvmap_free((struct nvmap_client *)mgr, (struct nvmap_handle_ref *)handle); }
static void tegra_dc_ext_flip_worker(struct work_struct *work) { struct tegra_dc_ext_flip_data *data = container_of(work, struct tegra_dc_ext_flip_data, work); struct tegra_dc_ext *ext = data->ext; struct tegra_dc_win *wins[DC_N_WINDOWS]; struct nvmap_handle_ref *unpin_handles[DC_N_WINDOWS * TEGRA_DC_NUM_PLANES]; struct nvmap_handle_ref *old_handle; int i, nr_unpin = 0, nr_win = 0; bool skip_flip = false; for (i = 0; i < DC_N_WINDOWS; i++) { struct tegra_dc_ext_flip_win *flip_win = &data->win[i]; int index = flip_win->attr.index; struct tegra_dc_win *win; struct tegra_dc_ext_win *ext_win; if (index < 0) continue; win = tegra_dc_get_window(ext->dc, index); ext_win = &ext->win[index]; if (!(atomic_dec_and_test(&ext_win->nr_pending_flips)) && (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_CURSOR)) skip_flip = true; if (skip_flip) old_handle = flip_win->handle[TEGRA_DC_Y]; else old_handle = ext_win->cur_handle[TEGRA_DC_Y]; if (old_handle) { int j; for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) { if (skip_flip) old_handle = flip_win->handle[j]; else old_handle = ext_win->cur_handle[j]; if (!old_handle) continue; unpin_handles[nr_unpin++] = old_handle; } } if (!skip_flip) tegra_dc_ext_set_windowattr(ext, win, &data->win[i]); wins[nr_win++] = win; } if (!skip_flip) { tegra_dc_update_windows(wins, nr_win); /* TODO: implement swapinterval here */ tegra_dc_sync_windows(wins, nr_win); } for (i = 0; i < DC_N_WINDOWS; i++) { struct tegra_dc_ext_flip_win *flip_win = &data->win[i]; int index = flip_win->attr.index; if (index < 0) continue; tegra_dc_incr_syncpt_min(ext->dc, index, flip_win->syncpt_max); } /* unpin and deref previous front buffers */ for (i = 0; i < nr_unpin; i++) { nvmap_unpin(ext->nvmap, unpin_handles[i]); nvmap_free(ext->nvmap, unpin_handles[i]); } kfree(data); }
int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user, struct tegra_dc_ext_cursor_image *args) { struct tegra_dc_ext *ext = user->ext; struct tegra_dc *dc = ext->dc; struct nvmap_handle_ref *handle, *old_handle; dma_addr_t phys_addr; u32 size; int ret; if (!user->nvmap) return -EFAULT; size = TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE(args->flags); #if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC) if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 && size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64) return -EINVAL; #else if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 && size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64 && size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_128x128 && size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_256x256) return -EINVAL; #endif #if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC) if (args->flags && TEGRA_DC_EXT_CURSOR_FLAGS_RGBA_NORMAL) return -EINVAL; #endif mutex_lock(&ext->cursor.lock); if (ext->cursor.user != user) { ret = -EACCES; goto unlock; } if (!ext->enabled) { ret = -ENXIO; goto unlock; } old_handle = ext->cursor.cur_handle; ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr); if (ret) goto unlock; ext->cursor.cur_handle = handle; mutex_lock(&dc->lock); tegra_dc_io_start(dc); tegra_dc_hold_dc_out(dc); set_cursor_image_hw(dc, args, phys_addr); tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); tegra_dc_release_dc_out(dc); tegra_dc_io_end(dc); /* XXX sync here? */ mutex_unlock(&dc->lock); mutex_unlock(&ext->cursor.lock); if (old_handle) { nvmap_unpin(ext->nvmap, old_handle); nvmap_free(ext->nvmap, old_handle); } return 0; unlock: mutex_unlock(&ext->cursor.lock); return ret; }
static int tegra_overlay_flip(struct tegra_overlay_info *overlay, struct tegra_overlay_flip_args *args, struct nvmap_client *user_nvmap) { struct tegra_overlay_flip_data *data; struct tegra_overlay_flip_win *flip_win; u32 syncpt_max; int i, err; if (WARN_ON(!overlay->ndev)) return -EFAULT; mutex_lock(&tegra_flip_lock); if (!overlay->dc->enabled) { mutex_unlock(&tegra_flip_lock); return -EFAULT; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) { dev_err(&overlay->ndev->dev, "can't allocate memory for flip\n"); mutex_unlock(&tegra_flip_lock); return -ENOMEM; } INIT_WORK(&data->work, tegra_overlay_flip_worker); data->overlay = overlay; data->flags = args->flags; for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) { flip_win = &data->win[i]; memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr)); if (flip_win->attr.index == -1) continue; err = tegra_overlay_pin_window(overlay, flip_win, user_nvmap); if (err < 0) { dev_err(&overlay->ndev->dev, "error setting window attributes\n"); goto surf_err; } } syncpt_max = tegra_dc_incr_syncpt_max(overlay->dc); data->syncpt_max = syncpt_max; queue_work(overlay->flip_wq, &data->work); /* * Before the queued flip_wq get scheduled, we set the EMC clock to the * default value in order to do FLIP without glitch. */ tegra_dc_set_default_emc(overlay->dc); args->post_syncpt_val = syncpt_max; args->post_syncpt_id = tegra_dc_get_syncpt_id(overlay->dc); mutex_unlock(&tegra_flip_lock); return 0; surf_err: while (i--) { if (data->win[i].handle) { nvmap_unpin(overlay->overlay_nvmap, data->win[i].handle); nvmap_free(overlay->overlay_nvmap, data->win[i].handle); } } kfree(data); mutex_unlock(&tegra_flip_lock); return err; }
static void tegra_overlay_flip_worker(struct work_struct *work) { struct tegra_overlay_flip_data *data = container_of(work, struct tegra_overlay_flip_data, work); struct tegra_overlay_info *overlay = data->overlay; struct tegra_dc_win *win; struct tegra_dc_win *wins[TEGRA_FB_FLIP_N_WINDOWS]; struct nvmap_handle_ref *unpin_handles[TEGRA_FB_FLIP_N_WINDOWS]; int i, nr_win = 0, nr_unpin = 0; data = container_of(work, struct tegra_overlay_flip_data, work); for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) { struct tegra_overlay_flip_win *flip_win = &data->win[i]; int idx = flip_win->attr.index; if (idx == -1) continue; win = tegra_dc_get_window(overlay->dc, idx); if (!win) continue; if (win->flags && win->cur_handle && !data->didim_work) unpin_handles[nr_unpin++] = win->cur_handle; tegra_overlay_set_windowattr(overlay, win, &data->win[i]); wins[nr_win++] = win; #if 0 if (flip_win->attr.pre_syncpt_id < 0) continue; printk("%08x %08x\n", flip_win->attr.pre_syncpt_id, flip_win->attr.pre_syncpt_val); nvhost_syncpt_wait_timeout(&nvhost_get_host(overlay->ndev)->syncpt, flip_win->attr.pre_syncpt_id, flip_win->attr.pre_syncpt_val, msecs_to_jiffies(500)); #endif } if (data->flags & TEGRA_OVERLAY_FLIP_FLAG_BLEND_REORDER) { struct tegra_dc_win *dcwins[DC_N_WINDOWS]; for (i = 0; i < DC_N_WINDOWS; i++) dcwins[i] = tegra_dc_get_window(overlay->dc, i); tegra_overlay_blend_reorder(&overlay->blend, dcwins); tegra_dc_update_windows(dcwins, DC_N_WINDOWS); tegra_dc_sync_windows(dcwins, DC_N_WINDOWS); } else { tegra_dc_update_windows(wins, nr_win); /* TODO: implement swapinterval here */ tegra_dc_sync_windows(wins, nr_win); } if ((overlay->dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) && (overlay->dc->out->flags & TEGRA_DC_OUT_N_SHOT_MODE)) { tegra_overlay_n_shot(data, unpin_handles, &nr_unpin); } else { tegra_dc_incr_syncpt_min(overlay->dc, 0, data->syncpt_max); /* unpin and deref previous front buffers */ for (i = 0; i < nr_unpin; i++) { nvmap_unpin(overlay->overlay_nvmap, unpin_handles[i]); nvmap_free(overlay->overlay_nvmap, unpin_handles[i]); } kfree(data); } }
static void tegra_dc_ext_flip_worker(struct work_struct *work) { struct tegra_dc_ext_flip_data *data = container_of(work, struct tegra_dc_ext_flip_data, work); struct tegra_dc_ext *ext = data->ext; struct tegra_dc_win *wins[DC_N_WINDOWS]; struct nvmap_handle_ref *unpin_handles[DC_N_WINDOWS]; int i, nr_unpin = 0, nr_win = 0, nr_disable = 0; for (i = 0; i < DC_N_WINDOWS; i++) { struct tegra_dc_ext_flip_win *flip_win = &data->win[i]; int index = flip_win->attr.index; struct tegra_dc_win *win; struct tegra_dc_ext_win *ext_win; bool old_ena, new_ena; if (index < 0) continue; win = tegra_dc_get_window(ext->dc, index); ext_win = &ext->win[index]; old_ena = ext->win[index].enabled; new_ena = flip_win->handle != NULL; if (old_ena != new_ena) { if (new_ena) process_window_change(ext, 1); else nr_disable++; } ext->win[index].enabled = new_ena; if (old_ena && ext_win->cur_handle) unpin_handles[nr_unpin++] = ext_win->cur_handle; tegra_dc_ext_set_windowattr(ext, win, &data->win[i]); wins[nr_win++] = win; } tegra_dc_update_windows(wins, nr_win); /* TODO: implement swapinterval here */ tegra_dc_sync_windows(wins, nr_win); for (i = 0; i < DC_N_WINDOWS; i++) { struct tegra_dc_ext_flip_win *flip_win = &data->win[i]; int index = flip_win->attr.index; if (index < 0) continue; tegra_dc_incr_syncpt_min(ext->dc, index, flip_win->syncpt_max); } /* unpin and deref previous front buffers */ for (i = 0; i < nr_unpin; i++) { nvmap_unpin(ext->nvmap, unpin_handles[i]); nvmap_free(ext->nvmap, unpin_handles[i]); } if (nr_disable) process_window_change(ext, -nr_disable); kfree(data); }
static void tegra_dc_ext_flip_worker(struct work_struct *work) { struct tegra_dc_ext_flip_data *data = container_of(work, struct tegra_dc_ext_flip_data, work); struct tegra_dc_ext *ext = data->ext; struct tegra_dc_win *wins[DC_N_WINDOWS]; struct nvmap_handle_ref *unpin_handles[DC_N_WINDOWS * TEGRA_DC_NUM_PLANES]; int i, nr_unpin = 0, nr_win = 0; for (i = 0; i < DC_N_WINDOWS; i++) { struct tegra_dc_ext_flip_win *flip_win = &data->win[i]; int index = flip_win->attr.index; struct tegra_dc_win *win; struct tegra_dc_ext_win *ext_win; if (index < 0) continue; win = tegra_dc_get_window(ext->dc, index); ext_win = &ext->win[index]; if (win->flags & TEGRA_WIN_FLAG_ENABLED) { int j; for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) { if (!ext_win->cur_handle[j]) continue; unpin_handles[nr_unpin++] = ext_win->cur_handle[j]; } } tegra_dc_ext_set_windowattr(ext, win, &data->win[i]); wins[nr_win++] = win; } tegra_dc_update_windows(wins, nr_win); /* TODO: implement swapinterval here */ tegra_dc_sync_windows(wins, nr_win); for (i = 0; i < DC_N_WINDOWS; i++) { struct tegra_dc_ext_flip_win *flip_win = &data->win[i]; int index = flip_win->attr.index; if (index < 0) continue; tegra_dc_incr_syncpt_min(ext->dc, index, flip_win->syncpt_max); } /* unpin and deref previous front buffers */ for (i = 0; i < nr_unpin; i++) { nvmap_unpin(ext->nvmap, unpin_handles[i]); nvmap_free(ext->nvmap, unpin_handles[i]); } kfree(data); }
static int tegra_dc_ext_flip(struct tegra_dc_ext_user *user, struct tegra_dc_ext_flip *args) { struct tegra_dc_ext *ext = user->ext; struct tegra_dc_ext_flip_data *data; int work_index; int i, ret = 0; if (!user->nvmap) return -EFAULT; ret = sanitize_flip_args(user, args); if (ret) return ret; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; INIT_WORK(&data->work, tegra_dc_ext_flip_worker); data->ext = ext; for (i = 0; i < DC_N_WINDOWS; i++) { struct tegra_dc_ext_flip_win *flip_win = &data->win[i]; int index = args->win[i].index; memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr)); if (index < 0) continue; ret = tegra_dc_ext_pin_window(user, flip_win->attr.buff_id, &flip_win->handle[TEGRA_DC_Y], &flip_win->phys_addr); if (ret) goto fail_pin; if (flip_win->attr.buff_id_u) { ret = tegra_dc_ext_pin_window(user, flip_win->attr.buff_id_u, &flip_win->handle[TEGRA_DC_U], &flip_win->phys_addr_u); if (ret) goto fail_pin; } else { flip_win->handle[TEGRA_DC_U] = NULL; flip_win->phys_addr_u = 0; } if (flip_win->attr.buff_id_v) { ret = tegra_dc_ext_pin_window(user, flip_win->attr.buff_id_v, &flip_win->handle[TEGRA_DC_V], &flip_win->phys_addr_v); if (ret) goto fail_pin; } else { flip_win->handle[TEGRA_DC_V] = NULL; flip_win->phys_addr_v = 0; } } ret = lock_windows_for_flip(user, args); if (ret) goto fail_pin; if (!ext->enabled) { ret = -ENXIO; goto unlock; } for (i = 0; i < DC_N_WINDOWS; i++) { u32 syncpt_max; int index = args->win[i].index; if (index < 0) continue; syncpt_max = tegra_dc_incr_syncpt_max(ext->dc, index); data->win[i].syncpt_max = syncpt_max; /* * Any of these windows' syncpoints should be equivalent for * the client, so we just send back an arbitrary one of them */ args->post_syncpt_val = syncpt_max; args->post_syncpt_id = tegra_dc_get_syncpt_id(ext->dc, index); work_index = index; } queue_work(ext->win[work_index].flip_wq, &data->work); unlock_windows_for_flip(user, args); return 0; unlock: unlock_windows_for_flip(user, args); fail_pin: for (i = 0; i < DC_N_WINDOWS; i++) { int j; for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) { if (!data->win[i].handle[j]) continue; nvmap_unpin(ext->nvmap, data->win[i].handle[j]); nvmap_free(ext->nvmap, data->win[i].handle[j]); } } kfree(data); return ret; }
static void ctx3d_deinit(struct nvhost_hwctx *ctx) { nvmap_free(ctx->restore, ctx->save_cpu_data); }