void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring) { int r; radeon_ring_free_size(rdev, ring); if (ring->rptr == ring->wptr) { r = radeon_ring_alloc(rdev, ring, 1); if (!r) { radeon_ring_write(ring, ring->nop); radeon_ring_commit(rdev, ring); } } }
/** * cik_dma_vm_flush - cik vm flush using sDMA * * @rdev: radeon_device pointer * * Update the page table base and flush the VM TLB * using sDMA (CIK). */ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) { u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ struct radeon_ring *ring = &rdev->ring[ridx]; if (vm == NULL) return; radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); if (vm->id < 8) { radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); } else {
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) { unsigned count_dw_pad; unsigned i; /* We pad to match fetch size */ count_dw_pad = (ring->align_mask + 1) - (ring->wptr & ring->align_mask); for (i = 0; i < count_dw_pad; i++) { radeon_ring_write(ring, ring->nop); } DRM_MEMORYBARRIER(); WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); (void)RREG32(ring->wptr_reg); }
/** * radeon_vce_fence_emit - add a fence command to the ring * * @rdev: radeon_device pointer * @fence: the fence * */ void radeon_vce_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) { struct radeon_ring *ring = &rdev->ring[fence->ring]; uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr; radeon_ring_write(ring, VCE_CMD_FENCE); radeon_ring_write(ring, addr); radeon_ring_write(ring, upper_32_bits(addr)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, VCE_CMD_TRAP); radeon_ring_write(ring, VCE_CMD_END); }
/** * radeon_vce_fence_emit - add a fence command to the ring * * @rdev: radeon_device pointer * @fence: the fence * */ void radeon_vce_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) { struct radeon_ring *ring = &rdev->ring[fence->ring]; uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE)); radeon_ring_write(ring, cpu_to_le32(addr)); radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr))); radeon_ring_write(ring, cpu_to_le32(fence->seq)); radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP)); radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); }
/** * radeon_ring_commit - tell the GPU to execute the new * commands on the ring buffer * * @rdev: radeon_device pointer * @ring: radeon_ring structure holding ring information * * Update the wptr (write pointer) to tell the GPU to * execute new commands on the ring buffer (all asics). */ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) { /* If we are emitting the HDP flush via the ring buffer, we need to * do it before padding. */ if (rdev->asic->ring[ring->idx]->hdp_flush) rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring); /* We pad to match fetch size */ while (ring->wptr & ring->align_mask) { radeon_ring_write(ring, ring->nop); } mb(); /* If we are emitting the HDP flush via MMIO, we need to do it after * all CPU writes to VRAM finished. */ if (rdev->asic->mmio_hdp_flush) rdev->asic->mmio_hdp_flush(rdev); radeon_ring_set_wptr(rdev, ring); }
/** * radeon_ring_restore - append saved commands to the ring again * * @rdev: radeon_device pointer * @ring: ring to append commands to * @size: number of dwords we want to write * @data: saved commands * * Allocates space on the ring and restore the previously saved commands. */ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, unsigned size, uint32_t *data) { int i, r; if (!size || !data) return 0; /* restore the saved ring content */ r = radeon_ring_lock(rdev, ring, size); if (r) return r; for (i = 0; i < size; ++i) { radeon_ring_write(ring, data[i]); } radeon_ring_unlock_commit(rdev, ring); kfree(data); return 0; }