static int adreno_drawctxt_wait_global(struct adreno_device *adreno_dev, struct kgsl_context *context, uint32_t timestamp, unsigned int timeout) { struct kgsl_device *device = &adreno_dev->dev; struct adreno_context *drawctxt = ADRENO_CONTEXT(context); int ret = 0; /* Needs to hold the device mutex */ BUG_ON(!mutex_is_locked(&device->mutex)); if (!_kgsl_context_get(context)) { ret = -EINVAL; goto done; } /* * If the context is invalid then return immediately - we may end up * waiting for a timestamp that will never come */ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) { kgsl_context_put(context); goto done; } trace_adreno_drawctxt_wait_start(KGSL_MEMSTORE_GLOBAL, timestamp); ret = kgsl_add_event(device, &device->global_events, timestamp, global_wait_callback, (void *) drawctxt); if (ret) { kgsl_context_put(context); goto done; } kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); if (timeout) { ret = (int) wait_event_timeout(drawctxt->waiting, _check_global_timestamp(device, drawctxt, timestamp), msecs_to_jiffies(timeout)); if (ret == 0) ret = -ETIMEDOUT; else if (ret > 0) ret = 0; } else { wait_event(drawctxt->waiting, _check_global_timestamp(device, drawctxt, timestamp)); } kgsl_mutex_lock(&device->mutex, &device->mutex_owner); if (ret) kgsl_cancel_events_timestamp(device, &device->global_events, timestamp); done: trace_adreno_drawctxt_wait_done(KGSL_MEMSTORE_GLOBAL, timestamp, ret); return ret; }
int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; int level, i, b; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } else if (flags && pwr->bus_control) { b = pwr->bus_mod; if ((flags & DEVFREQ_FLAG_FAST_HINT) && (pwr->bus_mod != FAST_BUS)) pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ? 0 : FAST_BUS; else if ((flags & DEVFREQ_FLAG_SLOW_HINT) && (pwr->bus_mod != SLOW_BUS)) pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ? 0 : SLOW_BUS; if (pwr->bus_mod != b) kgsl_pwrctrl_buslevel_update(device, true); } if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) && (!time_after(jiffies, pwr->constraint.expires)) && (level >= pwr->constraint.hint.pwrlevel.level)) *freq = cur_freq; else { kgsl_pwrctrl_pwrlevel_change(device, level); pwr->constraint.type = KGSL_CONSTRAINT_NONE; pwr->constraint.expires = 0; *freq = kgsl_pwrctrl_active_freq(pwr); } kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwrctrl; struct kgsl_pwrscale *pwrscale; s64 tmp; if (device == NULL) return -ENODEV; if (stat == NULL) return -EINVAL; pwrscale = &device->pwrscale; pwrctrl = &device->pwrctrl; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); /* * If the GPU clock is on grab the latest power counter * values. Otherwise the most recent ACTIVE values will * already be stored in accum_stats. */ kgsl_pwrscale_update_stats(device); tmp = ktime_to_us(ktime_get()); stat->total_time = tmp - pwrscale->time; pwrscale->time = tmp; stat->busy_time = pwrscale->accum_stats.busy_time; stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl); /* * keep the latest devfreq_dev_status values * and vbif counters data * to be (re)used by kgsl_busmon_get_dev_status() */ if (pwrctrl->bus_control) { struct xstats *last_b = (struct xstats *)last_status.private_data; last_status.total_time = stat->total_time; last_status.busy_time = stat->busy_time; last_status.current_frequency = stat->current_frequency; last_b->ram_time = device->pwrscale.accum_stats.ram_time; last_b->ram_wait = device->pwrscale.accum_stats.ram_wait; last_b->mod = device->pwrctrl.bus_mod; } kgsl_pwrctrl_busy_time(device, stat->total_time, stat->busy_time); trace_kgsl_pwrstats(device, stat->total_time, &pwrscale->accum_stats); memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats)); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrscale *pwrscale; s64 tmp; if (device == NULL) return -ENODEV; if (stat == NULL) return -EINVAL; pwrscale = &device->pwrscale; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); /* make sure we don't turn on clocks just to read stats */ if (device->state == KGSL_STATE_ACTIVE) { struct kgsl_power_stats extra; device->ftbl->power_stats(device, &extra); device->pwrscale.accum_stats.busy_time += extra.busy_time; device->pwrscale.accum_stats.ram_time += extra.ram_time; device->pwrscale.accum_stats.ram_wait += extra.ram_wait; } tmp = ktime_to_us(ktime_get()); stat->total_time = tmp - pwrscale->time; pwrscale->time = tmp; stat->busy_time = pwrscale->accum_stats.busy_time; stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl); if (stat->private_data) { struct xstats *b = (struct xstats *)stat->private_data; b->ram_time = device->pwrscale.accum_stats.ram_time; b->ram_wait = device->pwrscale.accum_stats.ram_wait; b->mod = device->pwrctrl.bus_mod; } #if defined (CONFIG_SYSTEM_LOAD_ANALYZER) { unsigned long long busy_time_x1000; if (stat->total_time != 0) { busy_time_x1000 = pwrscale->accum_stats.busy_time * 1000; do_div(busy_time_x1000, stat->total_time); store_external_load_factor(GPU_UTILIZATION, busy_time_x1000); } } #endif trace_kgsl_pwrstats(device, stat->total_time, &pwrscale->accum_stats); memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats)); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
static int pm_dump_set(void *data, u64 val) { struct kgsl_device *device = data; if (val) { kgsl_mutex_lock(&device->mutex, &device->mutex_owner); kgsl_postmortem_dump(device, 1); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); } return 0; }
/* * kgsl_devfreq_target - devfreq_dev_profile.target callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; int level, i, b; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } else if (flags && pwr->bus_control) { /* * Signal for faster or slower bus. If KGSL isn't already * running at the desired speed for the given level, modify * its vote. */ b = pwr->bus_mod; if ((flags & DEVFREQ_FLAG_FAST_HINT) && (pwr->bus_mod != FAST_BUS)) pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ? 0 : FAST_BUS; else if ((flags & DEVFREQ_FLAG_SLOW_HINT) && (pwr->bus_mod != SLOW_BUS)) pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ? 0 : SLOW_BUS; if (pwr->bus_mod != b) kgsl_pwrctrl_buslevel_update(device, true); } kgsl_pwrctrl_pwrlevel_change(device, level); *freq = kgsl_pwrctrl_active_freq(pwr); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_get_cur_freq - devfreq_dev_profile.get_cur_freq callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) { struct kgsl_device *device = dev_get_drvdata(dev); if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); *freq = kgsl_pwrctrl_active_freq(&device->pwrctrl); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/** * adreno_drawctxt_invalidate() - Invalidate an adreno draw context * @device: Pointer to the KGSL device structure for the GPU * @context: Pointer to the KGSL context structure * * Invalidate the context and remove all queued commands and cancel any pending * waiters */ void adreno_drawctxt_invalidate(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_context *drawctxt = ADRENO_CONTEXT(context); trace_adreno_drawctxt_invalidate(drawctxt); drawctxt->state = ADRENO_CONTEXT_STATE_INVALID; /* Clear the pending queue */ mutex_lock(&drawctxt->mutex); /* * set the timestamp to the last value since the context is invalidated * and we want the pending events for this context to go away */ kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), drawctxt->timestamp); kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), drawctxt->timestamp); while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { struct kgsl_cmdbatch *cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) % ADRENO_CONTEXT_CMDQUEUE_SIZE; mutex_unlock(&drawctxt->mutex); kgsl_mutex_lock(&device->mutex, &device->mutex_owner); kgsl_cancel_events_timestamp(device, &context->events, cmdbatch->timestamp); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); kgsl_cmdbatch_destroy(cmdbatch); mutex_lock(&drawctxt->mutex); } mutex_unlock(&drawctxt->mutex); /* Give the bad news to everybody waiting around */ wake_up_all(&drawctxt->waiting); wake_up_all(&drawctxt->wq); }
static int _check_context_timestamp(struct kgsl_device *device, struct adreno_context *drawctxt, unsigned int timestamp) { int ret = 0; /* Bail if the drawctxt has been invalidated or destroyed */ if (kgsl_context_detached(&drawctxt->base) || drawctxt->state != ADRENO_CONTEXT_STATE_ACTIVE) return 1; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); ret = kgsl_check_timestamp(device, &drawctxt->base, timestamp); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return ret; }
/* * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrscale *pwrscale; s64 tmp; if (device == NULL) return -ENODEV; if (stat == NULL) return -EINVAL; pwrscale = &device->pwrscale; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); /* make sure we don't turn on clocks just to read stats */ if (device->state == KGSL_STATE_ACTIVE) { struct kgsl_power_stats extra; device->ftbl->power_stats(device, &extra); device->pwrscale.accum_stats.busy_time += extra.busy_time; device->pwrscale.accum_stats.ram_time += extra.ram_time; device->pwrscale.accum_stats.ram_wait += extra.ram_wait; } tmp = ktime_to_us(ktime_get()); stat->total_time = tmp - pwrscale->time; pwrscale->time = tmp; stat->busy_time = pwrscale->accum_stats.busy_time; stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl); if (stat->private_data) { struct xstats *b = (struct xstats *)stat->private_data; b->ram_time = device->pwrscale.accum_stats.ram_time; b->ram_wait = device->pwrscale.accum_stats.ram_wait; b->mod = device->pwrctrl.bus_mod; } trace_kgsl_pwrstats(device, stat->total_time, &pwrscale->accum_stats); memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats)); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrscale *pwrscale; s64 tmp; if (device == NULL) return -ENODEV; if (stat == NULL) return -EINVAL; pwrscale = &device->pwrscale; memset(stat, 0, sizeof(*stat)); kgsl_mutex_lock(&device->mutex, &device->mutex_owner); /* * If the GPU clock is on grab the latest power counter * values. Otherwise the most recent ACTIVE values will * already be stored in accum_stats. */ kgsl_pwrscale_update_stats(device); tmp = ktime_to_us(ktime_get()); stat->total_time = tmp - pwrscale->time; pwrscale->time = tmp; stat->busy_time = pwrscale->accum_stats.busy_time; stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl); trace_kgsl_pwrstats(device, stat->total_time, &pwrscale->accum_stats); memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats)); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_target - devfreq_dev_profile.target callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; int level, i, b; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; if (flags & DEVFREQ_FLAG_WAKEUP_MAXFREQ) { /* * The GPU is about to get suspended, * but it needs to be at the max power level when waking up */ pwr->wakeup_maxpwrlevel = 1; return 0; } kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } else if (flags && pwr->bus_control) { /* * Signal for faster or slower bus. If KGSL isn't already * running at the desired speed for the given level, modify * its vote. */ b = pwr->bus_mod; if ((flags & DEVFREQ_FLAG_FAST_HINT) && (pwr->bus_mod != FAST_BUS)) pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ? 0 : FAST_BUS; else if ((flags & DEVFREQ_FLAG_SLOW_HINT) && (pwr->bus_mod != SLOW_BUS)) pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ? 0 : SLOW_BUS; if (pwr->bus_mod != b) kgsl_pwrctrl_buslevel_update(device, true); } /* * The power constraints need an entire interval to do their magic, so * skip changing the powerlevel if the time hasn't expired yet and the * new level is less than the constraint */ if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) && (!time_after(jiffies, pwr->constraint.expires)) && (level >= pwr->constraint.hint.pwrlevel.level)) *freq = cur_freq; else { /* Change the power level */ kgsl_pwrctrl_pwrlevel_change(device, level); /*Invalidate the constraint set */ pwr->constraint.type = KGSL_CONSTRAINT_NONE; pwr->constraint.expires = 0; *freq = kgsl_pwrctrl_active_freq(pwr); } kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/** * adreno_drawctxt_wait() - sleep until a timestamp expires * @adreno_dev: pointer to the adreno_device struct * @drawctxt: Pointer to the draw context to sleep for * @timetamp: Timestamp to wait on * @timeout: Number of jiffies to wait (0 for infinite) * * Register an event to wait for a timestamp on a context and sleep until it * has past. Returns < 0 on error, -ETIMEDOUT if the timeout expires or 0 * on success */ int adreno_drawctxt_wait(struct adreno_device *adreno_dev, struct kgsl_context *context, uint32_t timestamp, unsigned int timeout) { static unsigned int io_cnt; struct kgsl_device *device = &adreno_dev->dev; struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct adreno_context *drawctxt = ADRENO_CONTEXT(context); int ret, io; if (kgsl_context_detached(context)) return -EINVAL; if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) return -EDEADLK; /* Needs to hold the device mutex */ BUG_ON(!mutex_is_locked(&device->mutex)); trace_adreno_drawctxt_wait_start(context->id, timestamp); ret = kgsl_add_event(device, &context->events, timestamp, wait_callback, (void *) drawctxt); if (ret) goto done; /* * For proper power accounting sometimes we need to call * io_wait_interruptible_timeout and sometimes we need to call * plain old wait_interruptible_timeout. We call the regular * timeout N times out of 100, where N is a number specified by * the current power level */ io_cnt = (io_cnt + 1) % 100; io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction) ? 0 : 1; kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); if (timeout) { long ret_temp; ret_temp = adreno_wait_event_interruptible_timeout( drawctxt->waiting, _check_context_timestamp(device, drawctxt, timestamp), msecs_to_jiffies(timeout), io); if (ret_temp == 0) ret = -ETIMEDOUT; else if (ret_temp > 0) ret = 0; else ret = (int) ret_temp; } else { ret = (int) adreno_wait_event_interruptible(drawctxt->waiting, _check_context_timestamp(device, drawctxt, timestamp), io); } kgsl_mutex_lock(&device->mutex, &device->mutex_owner); /* -EDEADLK if the context was invalidated while we were waiting */ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) ret = -EDEADLK; /* Return -EINVAL if the context was detached while we were waiting */ if (kgsl_context_detached(context)) ret = -EINVAL; done: trace_adreno_drawctxt_wait_done(context->id, timestamp, ret); return ret; }
/* * kgsl_devfreq_target - devfreq_dev_profile.target callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; struct kgsl_pwrlevel *pwr_level; int level, i; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; if (flags & DEVFREQ_FLAG_WAKEUP_MAXFREQ) { /* * The GPU is about to get suspended, * but it needs to be at the max power level when waking up */ pwr->wakeup_maxpwrlevel = 1; return 0; } kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; pwr_level = &pwr->pwrlevels[level]; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } /* * The power constraints need an entire interval to do their magic, so * skip changing the powerlevel if the time hasn't expired yet and the * new level is less than the constraint */ if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) && (!time_after(jiffies, pwr->constraint.expires)) && (level >= pwr->constraint.hint.pwrlevel.level)) *freq = cur_freq; else { /* Change the power level */ kgsl_pwrctrl_pwrlevel_change(device, level); /*Invalidate the constraint set */ pwr->constraint.type = KGSL_CONSTRAINT_NONE; pwr->constraint.expires = 0; *freq = kgsl_pwrctrl_active_freq(pwr); } kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
int kgsl_add_fence_event(struct kgsl_device *device, u32 context_id, u32 timestamp, void __user *data, int len, struct kgsl_device_private *owner) { struct kgsl_fence_event_priv *event; struct kgsl_timestamp_event_fence priv; struct kgsl_context *context; struct sync_pt *pt; struct sync_fence *fence = NULL; int ret = -EINVAL; char fence_name[sizeof(fence->name)] = {}; priv.fence_fd = -1; if (len != sizeof(priv)) return -EINVAL; event = kzalloc(sizeof(*event), GFP_KERNEL); if (event == NULL) return -ENOMEM; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); context = kgsl_context_get_owner(owner, context_id); if (context == NULL) goto unlock; event->context = context; event->timestamp = timestamp; pt = kgsl_sync_pt_create(context->timeline, timestamp); if (pt == NULL) { KGSL_DRV_ERR(device, "kgsl_sync_pt_create failed\n"); ret = -ENOMEM; goto unlock; } snprintf(fence_name, sizeof(fence_name), "%s-pid-%d-ctx-%d-ts-%d", device->name, current->group_leader->pid, context_id, timestamp); fence = sync_fence_create(fence_name, pt); if (fence == NULL) { /* only destroy pt when not added to fence */ kgsl_sync_pt_destroy(pt); KGSL_DRV_ERR(device, "sync_fence_create failed\n"); ret = -ENOMEM; goto unlock; } priv.fence_fd = get_unused_fd_flags(0); if (priv.fence_fd < 0) { KGSL_DRV_ERR(device, "Unable to get a file descriptor: %d\n", priv.fence_fd); ret = priv.fence_fd; goto unlock; } /* Unlock the mutex before copying to user */ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); if (copy_to_user(data, &priv, sizeof(priv))) { ret = -EFAULT; goto out; } /* * Hold the context ref-count for the event - it will get released in * the callback */ ret = kgsl_add_event(device, &context->events, timestamp, kgsl_fence_event_cb, event); if (ret) goto out; sync_fence_install(fence, priv.fence_fd); return 0; unlock: kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); out: if (priv.fence_fd >= 0) put_unused_fd(priv.fence_fd); if (fence) sync_fence_put(fence); kgsl_context_put(context); kfree(event); return ret; }
int kgsl_add_fence_event(struct kgsl_device *device, u32 context_id, u32 timestamp, void __user *data, int len, struct kgsl_device_private *owner) { struct kgsl_timestamp_event_fence priv; struct kgsl_context *context; struct sync_pt *pt; struct sync_fence *fence = NULL; int ret = -EINVAL; char fence_name[sizeof(fence->name)] = {}; unsigned int cur; priv.fence_fd = -1; if (len != sizeof(priv)) return -EINVAL; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); context = kgsl_context_get_owner(owner, context_id); if (context == NULL) goto unlock; pt = kgsl_sync_pt_create(context->timeline, context, timestamp); if (pt == NULL) { KGSL_DRV_ERR(device, "kgsl_sync_pt_create failed\n"); ret = -ENOMEM; goto unlock; } snprintf(fence_name, sizeof(fence_name), "%s-pid-%d-ctx-%d-ts-%d", device->name, current->group_leader->pid, context_id, timestamp); fence = sync_fence_create(fence_name, pt); if (fence == NULL) { /* only destroy pt when not added to fence */ kgsl_sync_pt_destroy(pt); KGSL_DRV_ERR(device, "sync_fence_create failed\n"); ret = -ENOMEM; goto unlock; } priv.fence_fd = get_unused_fd_flags(0); if (priv.fence_fd < 0) { KGSL_DRV_ERR(device, "Unable to get a file descriptor: %d\n", priv.fence_fd); ret = priv.fence_fd; goto unlock; } sync_fence_install(fence, priv.fence_fd); /* * If the timestamp hasn't expired yet create an event to trigger it. * Otherwise, just signal the fence - there is no reason to go through * the effort of creating a fence we don't need. */ cur = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED); if (timestamp_cmp(cur, timestamp) >= 0) kgsl_sync_timeline_signal(context->timeline, cur); else { ret = _add_fence_event(device, context, timestamp); if (ret) goto unlock; } kgsl_context_put(context); /* Unlock the mutex before copying to user */ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); if (copy_to_user(data, &priv, sizeof(priv))) { ret = -EFAULT; goto out; } return 0; unlock: kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); out: if (priv.fence_fd >= 0) put_unused_fd(priv.fence_fd); if (fence) sync_fence_put(fence); kgsl_context_put(context); return ret; }