/** * radeon_vce_idle_work_handler - power off VCE * * @work: pointer to work structure * * power of VCE when it's not used any more */ static void radeon_vce_idle_work_handler(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, vce.idle_work.work); if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) && (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) { if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { radeon_dpm_enable_vce(rdev, false); } else { radeon_set_vce_clocks(rdev, 0, 0); } } else { schedule_delayed_work(&rdev->vce.idle_work, msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); } }
/** * radeon_ring_backup - Back up the content of a ring * * @rdev: radeon_device pointer * @ring: the ring we want to back up * * Saves all unprocessed commits from a ring, returns the number of dwords saved. */ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, uint32_t **data) { unsigned size, ptr, i; /* just in case lock the ring */ mutex_lock(&rdev->ring_lock); *data = NULL; if (ring->ring_obj == NULL) { mutex_unlock(&rdev->ring_lock); return 0; } /* it doesn't make sense to save anything if all fences are signaled */ if (!radeon_fence_count_emitted(rdev, ring->idx)) { mutex_unlock(&rdev->ring_lock); return 0; } /* calculate the number of dw on the ring */ if (ring->rptr_save_reg) ptr = RREG32(ring->rptr_save_reg); else if (rdev->wb.enabled) ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); else { /* no way to read back the next rptr */ mutex_unlock(&rdev->ring_lock); return 0; } size = ring->wptr + (ring->ring_size / 4); size -= ptr; size &= ring->ptr_mask; if (size == 0) { mutex_unlock(&rdev->ring_lock); return 0; } /* and then save the content of the ring */ *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); if (!*data) { mutex_unlock(&rdev->ring_lock); return 0; } for (i = 0; i < size; ++i) { (*data)[i] = ring->ring[ptr++]; ptr &= ring->ptr_mask; } mutex_unlock(&rdev->ring_lock); return size; }