static int radeon_debugfs_fence_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; int i, j; for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!rdev->fence_drv[i].initialized) continue; radeon_fence_process(rdev, i); seq_printf(m, "--- ring %d ---\n", i); seq_printf(m, "Last signaled fence 0x%016llx\n", (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); seq_printf(m, "Last emitted 0x%016"PRIx64"\n", rdev->fence_drv[i].sync_seq[i]); for (j = 0; j < RADEON_NUM_RINGS; ++j) { if (i != j && rdev->fence_drv[j].initialized) seq_printf(m, "Last sync to ring %d 0x%016"PRIx64"\n", j, rdev->fence_drv[i].sync_seq[j]); } } return 0; }
int rs600_irq_process(struct radeon_device *rdev) { uint32_t status; uint32_t r500_disp_int; status = rs600_irq_ack(rdev, &r500_disp_int); if (!status && !r500_disp_int) { return IRQ_NONE; } while (status || r500_disp_int) { /* SW interrupt */ if (status & RADEON_SW_INT_TEST) { radeon_fence_process(rdev); } /* Vertical blank interrupts */ if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 0); } if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 1); } status = rs600_irq_ack(rdev, &r500_disp_int); } return IRQ_HANDLED; }
/** * radeon_fence_seq_signaled - check if a fence sequence number has signaled * * @rdev: radeon device pointer * @seq: sequence number * @ring: ring index the fence is associated with * * Check if the last signaled fence sequnce number is >= the requested * sequence number (all asics). * Returns true if the fence has signaled (current fence value * is >= requested value) or false if it has not (current fence * value is < the requested value. Helper function for * radeon_fence_signaled(). */ static bool radeon_fence_seq_signaled(struct radeon_device *rdev, u64 seq, unsigned ring) { if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { return true; } /* poll new last sequence at least once */ radeon_fence_process(rdev, ring); if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { return true; } return false; }
/** * radeon_fence_count_emitted - get the count of emitted fences * * @rdev: radeon device pointer * @ring: ring index the fence is associated with * * Get the number of fences emitted on the requested ring (all asics). * Returns the number of emitted fences on the ring. Used by the * dynpm code to ring track activity. */ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) { uint64_t emitted; /* We are not protected by ring lock when reading the last sequence * but it's ok to report slightly wrong fence count here. */ radeon_fence_process(rdev, ring); emitted = rdev->fence_drv[ring].sync_seq[ring] - atomic64_read(&rdev->fence_drv[ring].last_seq); /* to avoid 32bits warp around */ if (emitted > 0x10000000) { emitted = 0x10000000; } return (unsigned)emitted; }
static bool radeon_fence_is_signaled(struct fence *f) { struct radeon_fence *fence = to_radeon_fence(f); struct radeon_device *rdev = fence->rdev; unsigned ring = fence->ring; u64 seq = fence->seq; if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { return true; } // if (down_read_trylock(&rdev->exclusive_lock)) { radeon_fence_process(rdev, ring); // up_read(&rdev->exclusive_lock); if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { return true; } } return false; }