void sde_sync_put(void *fence) { if (fence) sync_fence_put(fence); }
/** * Check if fence has been signaled. * * @param system Timeline system. * @param fence Timeline fence. * @return MALI_TRUE if fence is signaled, MALI_FALSE if not. */ static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence) { int i; u32 tid = _mali_osk_get_tid(); mali_bool ret = MALI_TRUE; #if defined(CONFIG_SYNC) struct sync_fence *sync_fence = NULL; #endif MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT_POINTER(fence); mali_spinlock_reentrant_wait(system->spinlock, tid); for (i = 0; i < MALI_TIMELINE_MAX; ++i) { struct mali_timeline *timeline; mali_timeline_point point; point = fence->points[i]; if (likely(MALI_TIMELINE_NO_POINT == point)) { /* Fence contains no point on this timeline. */ continue; } timeline = system->timelines[i]; MALI_DEBUG_ASSERT_POINTER(timeline); if (unlikely(!mali_timeline_is_point_valid(timeline, point))) { MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next)); } if (!mali_timeline_is_point_released(timeline, point)) { ret = MALI_FALSE; goto exit; } } #if defined(CONFIG_SYNC) if (-1 != fence->sync_fd) { sync_fence = sync_fence_fdget(fence->sync_fd); if (likely(NULL != sync_fence)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) if (0 == sync_fence->status) { #else if (0 == atomic_read(&sync_fence->status)) { #endif ret = MALI_FALSE; } } else { MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd)); } } #endif /* defined(CONFIG_SYNC) */ exit: mali_spinlock_reentrant_signal(system->spinlock, tid); #if defined(CONFIG_SYNC) if (NULL != sync_fence) { sync_fence_put(sync_fence); } #endif /* defined(CONFIG_SYNC) */ return ret; } mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout) { struct mali_timeline_fence_wait_tracker *wait; mali_timeline_point point; mali_bool ret; MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT_POINTER(fence); MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n")); if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) { return mali_timeline_fence_wait_check_status(system, fence); } wait = mali_timeline_fence_wait_tracker_alloc(); if (unlikely(NULL == wait)) { MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n")); return MALI_FALSE; } wait->activated = MALI_FALSE; wait->system = system; /* Initialize refcount to two references. The reference first will be released by this * function after the wait is over. The second reference will be released when the tracker * is activated. */ _mali_osk_atomic_init(&wait->refcount, 2); /* Add tracker to timeline system, but not to a timeline. */ mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait); point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE); MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point); MALI_IGNORE(point); /* Wait for the tracker to be activated or time out. */ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) { _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait); } else { _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout); } ret = wait->activated; if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { mali_timeline_fence_wait_tracker_free(wait); } return ret; } void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait) { mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; MALI_DEBUG_ASSERT_POINTER(wait); MALI_DEBUG_ASSERT_POINTER(wait->system); MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n")); MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated); wait->activated = MALI_TRUE; _mali_osk_wait_queue_wake_up(wait->system->wait_queue); /* Nothing can wait on this tracker, so nothing to schedule after release. */ schedule_mask = mali_timeline_tracker_release(&wait->tracker); MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask); MALI_IGNORE(schedule_mask); if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { mali_timeline_fence_wait_tracker_free(wait); } }
s32 vr_timeline_sync_fence_create(struct vr_timeline_system *system, struct vr_timeline_fence *fence) { u32 i; struct sync_fence *sync_fence_acc = NULL; VR_DEBUG_ASSERT_POINTER(system); VR_DEBUG_ASSERT_POINTER(fence); for (i = 0; i < VR_TIMELINE_MAX; ++i) { struct vr_timeline *timeline; struct sync_fence *sync_fence; if (VR_TIMELINE_NO_POINT == fence->points[i]) continue; timeline = system->timelines[i]; VR_DEBUG_ASSERT_POINTER(timeline); sync_fence = vr_timeline_sync_fence_create_and_add_tracker(timeline, fence->points[i]); if (NULL == sync_fence) { /* temp test */ printk("[MALI] error(%d)\n", __LINE__); goto error; } if (NULL != sync_fence_acc) { /* Merge sync fences. */ sync_fence_acc = vr_sync_fence_merge(sync_fence_acc, sync_fence); if (NULL == sync_fence_acc) { /* temp test */ printk("[MALI] error(%d)\n", __LINE__); goto error; } } else { /* This was the first sync fence created. */ sync_fence_acc = sync_fence; } } if (-1 != fence->sync_fd) { struct sync_fence *sync_fence; sync_fence = sync_fence_fdget(fence->sync_fd); if (NULL == sync_fence) { /* temp test */ printk("[MALI] error(%d)\n", __LINE__); goto error; } if (NULL != sync_fence_acc) { sync_fence_acc = vr_sync_fence_merge(sync_fence_acc, sync_fence); if (NULL == sync_fence_acc) { /* temp test */ printk("[MALI] error(%d)\n", __LINE__); goto error; } } else { sync_fence_acc = sync_fence; } } if (NULL == sync_fence_acc) { VR_DEBUG_ASSERT_POINTER(system->signaled_sync_tl); /* There was nothing to wait on, so return an already signaled fence. */ sync_fence_acc = vr_sync_timeline_create_signaled_fence(system->signaled_sync_tl); if (NULL == sync_fence_acc) { /* temp test */ printk("[MALI] error(%d)\n", __LINE__); goto error; } } /* Return file descriptor for the accumulated sync fence. */ return vr_sync_fence_fd_alloc(sync_fence_acc); error: if (NULL != sync_fence_acc) { sync_fence_put(sync_fence_acc); } return -1; }
int kgsl_add_fence_event(struct kgsl_device *device, u32 context_id, u32 timestamp, void __user *data, int len, struct kgsl_device_private *owner) { struct kgsl_fence_event_priv *event; struct kgsl_timestamp_event_fence priv; struct kgsl_context *context; struct sync_pt *pt; struct sync_fence *fence = NULL; int ret = -EINVAL; char fence_name[sizeof(fence->name)] = {}; priv.fence_fd = -1; if (len != sizeof(priv)) return -EINVAL; event = kzalloc(sizeof(*event), GFP_KERNEL); if (event == NULL) return -ENOMEM; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); context = kgsl_context_get_owner(owner, context_id); if (context == NULL) goto unlock; event->context = context; event->timestamp = timestamp; pt = kgsl_sync_pt_create(context->timeline, timestamp); if (pt == NULL) { KGSL_DRV_ERR(device, "kgsl_sync_pt_create failed\n"); ret = -ENOMEM; goto unlock; } snprintf(fence_name, sizeof(fence_name), "%s-pid-%d-ctx-%d-ts-%d", device->name, current->group_leader->pid, context_id, timestamp); fence = sync_fence_create(fence_name, pt); if (fence == NULL) { /* only destroy pt when not added to fence */ kgsl_sync_pt_destroy(pt); KGSL_DRV_ERR(device, "sync_fence_create failed\n"); ret = -ENOMEM; goto unlock; } priv.fence_fd = get_unused_fd_flags(0); if (priv.fence_fd < 0) { KGSL_DRV_ERR(device, "Unable to get a file descriptor: %d\n", priv.fence_fd); ret = priv.fence_fd; goto unlock; } sync_fence_install(fence, priv.fence_fd); /* Unlock the mutex before copying to user */ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); if (copy_to_user(data, &priv, sizeof(priv))) { ret = -EFAULT; goto out; } /* * Hold the context ref-count for the event - it will get released in * the callback */ ret = kgsl_add_event(device, &context->events, timestamp, kgsl_fence_event_cb, event); if (ret) goto out; return 0; unlock: kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); out: if (priv.fence_fd >= 0) put_unused_fd(priv.fence_fd); if (fence) sync_fence_put(fence); kgsl_context_put(context); kfree(event); return ret; }
int kgsl_add_fence_event(struct kgsl_device *device, u32 context_id, u32 timestamp, void __user *data, int len, struct kgsl_device_private *owner) { struct kgsl_fence_event_priv *event; struct kgsl_timestamp_event_fence priv; struct kgsl_context *context; struct sync_pt *pt; struct sync_fence *fence = NULL; int ret = -EINVAL; if (len != sizeof(priv)) return -EINVAL; event = kzalloc(sizeof(*event), GFP_KERNEL); if (event == NULL) return -ENOMEM; context = kgsl_context_get_owner(owner, context_id); if (context == NULL) goto fail_pt; event->context = context; event->timestamp = timestamp; pt = kgsl_sync_pt_create(context->timeline, timestamp); if (pt == NULL) { KGSL_DRV_ERR(device, "kgsl_sync_pt_create failed\n"); ret = -ENOMEM; goto fail_pt; } fence = sync_fence_create("kgsl-fence", pt); if (fence == NULL) { /* only destroy pt when not added to fence */ kgsl_sync_pt_destroy(pt); KGSL_DRV_ERR(device, "sync_fence_create failed\n"); ret = -ENOMEM; goto fail_fence; } priv.fence_fd = get_unused_fd_flags(0); if (priv.fence_fd < 0) { KGSL_DRV_ERR(device, "invalid fence fd\n"); ret = -EINVAL; goto fail_fd; } sync_fence_install(fence, priv.fence_fd); if (copy_to_user(data, &priv, sizeof(priv))) { ret = -EFAULT; goto fail_copy_fd; } /* * Hold the context ref-count for the event - it will get released in * the callback */ ret = kgsl_add_event(device, context_id, timestamp, kgsl_fence_event_cb, event, owner); if (ret) goto fail_event; return 0; fail_event: fail_copy_fd: /* clean up sync_fence_install */ put_unused_fd(priv.fence_fd); fail_fd: /* clean up sync_fence_create */ sync_fence_put(fence); fail_fence: fail_pt: kgsl_context_put(context); kfree(event); return ret; }