static int tegra_overlay_flip(struct tegra_overlay_info *overlay, struct tegra_overlay_flip_args *args, struct nvmap_client *user_nvmap) { struct tegra_overlay_flip_data *data; struct tegra_overlay_flip_win *flip_win; u32 syncpt_max; int i, err; if (WARN_ON(!overlay->ndev)) return -EFAULT; data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) { dev_err(&overlay->ndev->dev, "can't allocate memory for flip\n"); return -ENOMEM; } INIT_WORK(&data->work, tegra_overlay_flip_worker); data->overlay = overlay; for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) { flip_win = &data->win[i]; memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr)); if (flip_win->attr.index == -1) continue; err = tegra_overlay_pin_window(overlay, flip_win, user_nvmap); if (err < 0) { dev_err(&overlay->ndev->dev, "error setting window attributes\n"); goto surf_err; } } syncpt_max = tegra_dc_incr_syncpt_max(overlay->dc); data->syncpt_max = syncpt_max; queue_work(overlay->flip_wq, &data->work); args->post_syncpt_val = syncpt_max; args->post_syncpt_id = tegra_dc_get_syncpt_id(overlay->dc); return 0; surf_err: while (i--) { if (data->win[i].handle) { nvmap_unpin(overlay->overlay_nvmap, data->win[i].handle); nvmap_free(overlay->overlay_nvmap, data->win[i].handle); } } kfree(data); return err; }
static int tegra_dc_ext_flip(struct tegra_dc_ext_user *user, struct tegra_dc_ext_flip *args) { struct tegra_dc_ext *ext = user->ext; struct tegra_dc_ext_flip_data *data; int work_index; int i, ret = 0; if (!user->nvmap) return -EFAULT; ret = sanitize_flip_args(user, args); if (ret) return ret; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; INIT_WORK(&data->work, tegra_dc_ext_flip_worker); data->ext = ext; for (i = 0; i < DC_N_WINDOWS; i++) { struct tegra_dc_ext_flip_win *flip_win = &data->win[i]; int index = args->win[i].index; memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr)); if (index < 0) continue; ret = tegra_dc_ext_pin_window(user, flip_win->attr.buff_id, &flip_win->handle[TEGRA_DC_Y], &flip_win->phys_addr); if (ret) goto fail_pin; if (flip_win->attr.buff_id_u) { ret = tegra_dc_ext_pin_window(user, flip_win->attr.buff_id_u, &flip_win->handle[TEGRA_DC_U], &flip_win->phys_addr_u); if (ret) goto fail_pin; } else { flip_win->handle[TEGRA_DC_U] = NULL; flip_win->phys_addr_u = 0; } if (flip_win->attr.buff_id_v) { ret = tegra_dc_ext_pin_window(user, flip_win->attr.buff_id_v, &flip_win->handle[TEGRA_DC_V], &flip_win->phys_addr_v); if (ret) goto fail_pin; } else { flip_win->handle[TEGRA_DC_V] = NULL; flip_win->phys_addr_v = 0; } } ret = lock_windows_for_flip(user, args); if (ret) goto fail_pin; if (!ext->enabled) { ret = -ENXIO; goto unlock; } for (i = 0; i < DC_N_WINDOWS; i++) { u32 syncpt_max; int index = args->win[i].index; if (index < 0) continue; syncpt_max = tegra_dc_incr_syncpt_max(ext->dc, index); data->win[i].syncpt_max = syncpt_max; /* * Any of these windows' syncpoints should be equivalent for * the client, so we just send back an arbitrary one of them */ args->post_syncpt_val = syncpt_max; args->post_syncpt_id = tegra_dc_get_syncpt_id(ext->dc, index); work_index = index; } queue_work(ext->win[work_index].flip_wq, &data->work); unlock_windows_for_flip(user, args); return 0; unlock: unlock_windows_for_flip(user, args); fail_pin: for (i = 0; i < DC_N_WINDOWS; i++) { int j; for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) { if (!data->win[i].handle[j]) continue; nvmap_unpin(ext->nvmap, data->win[i].handle[j]); nvmap_free(ext->nvmap, data->win[i].handle[j]); } } kfree(data); return ret; }
static int tegra_overlay_flip(struct tegra_overlay_info *overlay, struct tegra_overlay_flip_args *args, struct nvmap_client *user_nvmap) { struct tegra_overlay_flip_data *data; struct tegra_overlay_flip_win *flip_win; u32 syncpt_max; int i, err; if (WARN_ON(!overlay->ndev)) return -EFAULT; mutex_lock(&tegra_flip_lock); if (!overlay->dc->enabled) { mutex_unlock(&tegra_flip_lock); return -EFAULT; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) { dev_err(&overlay->ndev->dev, "can't allocate memory for flip\n"); mutex_unlock(&tegra_flip_lock); return -ENOMEM; } INIT_WORK(&data->work, tegra_overlay_flip_worker); data->overlay = overlay; data->flags = args->flags; for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) { flip_win = &data->win[i]; memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr)); if (flip_win->attr.index == -1) continue; err = tegra_overlay_pin_window(overlay, flip_win, user_nvmap); if (err < 0) { dev_err(&overlay->ndev->dev, "error setting window attributes\n"); goto surf_err; } } syncpt_max = tegra_dc_incr_syncpt_max(overlay->dc); data->syncpt_max = syncpt_max; queue_work(overlay->flip_wq, &data->work); /* * Before the queued flip_wq get scheduled, we set the EMC clock to the * default value in order to do FLIP without glitch. */ tegra_dc_set_default_emc(overlay->dc); args->post_syncpt_val = syncpt_max; args->post_syncpt_id = tegra_dc_get_syncpt_id(overlay->dc); mutex_unlock(&tegra_flip_lock); return 0; surf_err: while (i--) { if (data->win[i].handle) { nvmap_unpin(overlay->overlay_nvmap, data->win[i].handle); nvmap_free(overlay->overlay_nvmap, data->win[i].handle); } } kfree(data); mutex_unlock(&tegra_flip_lock); return err; }