int vce_v1_0_init(struct radeon_device *rdev) { struct radeon_ring *ring; int r; r = vce_v1_0_start(rdev); if (r) return r; ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; ring->ready = true; r = radeon_ring_test(rdev, TN_RING_TYPE_VCE1_INDEX, ring); if (r) { ring->ready = false; return r; } ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; ring->ready = true; r = radeon_ring_test(rdev, TN_RING_TYPE_VCE2_INDEX, ring); if (r) { ring->ready = false; return r; } DRM_INFO("VCE initialized successfully.\n"); return 0; }
/** * uvd_v1_0_init - start and test UVD block * * @rdev: radeon_device pointer * * Initialize the hardware, boot up the VCPU and do some testing */ int uvd_v1_0_init(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; uint32_t tmp; int r; /* raise clocks while booting up the VCPU */ if (rdev->family < CHIP_RV740) radeon_set_uvd_clocks(rdev, 10000, 10000); else radeon_set_uvd_clocks(rdev, 53300, 40000); r = uvd_v1_0_start(rdev); if (r) goto done; ring->ready = true; r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring); if (r) { ring->ready = false; goto done; } r = radeon_ring_lock(rdev, ring, 10); if (r) { DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r); goto done; } tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); radeon_ring_write(ring, tmp); radeon_ring_write(ring, 0xFFFFF); tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); radeon_ring_write(ring, tmp); radeon_ring_write(ring, 0xFFFFF); tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); radeon_ring_write(ring, tmp); radeon_ring_write(ring, 0xFFFFF); /* Clear timeout status bits */ radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0)); radeon_ring_write(ring, 0x8); radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); radeon_ring_write(ring, 3); radeon_ring_unlock_commit(rdev, ring); done: /* lower clocks again */ radeon_set_uvd_clocks(rdev, 0, 0); if (!r) DRM_INFO("UVD initialized successfully.\n"); return r; }
/** * cik_sdma_gfx_resume - setup and start the async dma engines * * @rdev: radeon_device pointer * * Set up the gfx DMA ring buffers and enable them (CIK). * Returns 0 for success, error for failure. */ static int cik_sdma_gfx_resume(struct radeon_device *rdev) { struct radeon_ring *ring; u32 rb_cntl, ib_cntl; u32 rb_bufsz; u32 reg_offset, wb_offset; int i, r; for (i = 0; i < 2; i++) { if (i == 0) { ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; reg_offset = SDMA0_REGISTER_OFFSET; wb_offset = R600_WB_DMA_RPTR_OFFSET; } else { ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; reg_offset = SDMA1_REGISTER_OFFSET; wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; } WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = rb_bufsz << 1; #ifdef __BIG_ENDIAN rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; #endif WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); /* Initialize the ring buffer's read and write pointers */ WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0); WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0); /* set the wb address whether it's enabled or not */ WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset, upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset, ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); if (rdev->wb.enabled) rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE; WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8); WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40); ring->wptr = 0; WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2); /* enable DMA RB */ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE); ib_cntl = SDMA_IB_ENABLE; #ifdef __BIG_ENDIAN ib_cntl |= SDMA_IB_SWAP_ENABLE; #endif /* enable DMA IBs */ WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl); ring->ready = true; r = radeon_ring_test(rdev, ring->idx, ring); if (r) { ring->ready = false; return r; } } if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) || (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX)) radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; }
/** * r600_dma_resume - setup and start the async dma engine * * @rdev: radeon_device pointer * * Set up the DMA ring buffer and enable it. (r6xx-evergreen). * Returns 0 for success, error for failure. */ int r600_dma_resume(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; u32 rb_cntl, dma_cntl, ib_cntl; u32 rb_bufsz; int r; /* Reset dma */ if (rdev->family >= CHIP_RV770) WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); else WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); RREG32(SRBM_SOFT_RESET); udelay(50); WREG32(SRBM_SOFT_RESET, 0); WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = rb_bufsz << 1; #ifdef __BIG_ENDIAN rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; #endif WREG32(DMA_RB_CNTL, rb_cntl); /* Initialize the ring buffer's read and write pointers */ WREG32(DMA_RB_RPTR, 0); WREG32(DMA_RB_WPTR, 0); /* set the wb address whether it's enabled or not */ WREG32(DMA_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); WREG32(DMA_RB_RPTR_ADDR_LO, ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); if (rdev->wb.enabled) rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); /* enable DMA IBs */ ib_cntl = DMA_IB_ENABLE; #ifdef __BIG_ENDIAN ib_cntl |= DMA_IB_SWAP_ENABLE; #endif WREG32(DMA_IB_CNTL, ib_cntl); dma_cntl = RREG32(DMA_CNTL); dma_cntl &= ~CTXEMPTY_INT_ENABLE; WREG32(DMA_CNTL, dma_cntl); if (rdev->family >= CHIP_RV770) WREG32(DMA_MODE, 1); ring->wptr = 0; WREG32(DMA_RB_WPTR, ring->wptr << 2); ring->rptr = RREG32(DMA_RB_RPTR) >> 2; WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); ring->ready = true; r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); if (r) { ring->ready = false; return r; } if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; }
int cayman_cp_resume(struct radeon_device *rdev) { struct radeon_ring *ring; u32 tmp; u32 rb_bufsz; int r; /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | SOFT_RESET_PA | SOFT_RESET_SH | SOFT_RESET_VGT | SOFT_RESET_SPI | SOFT_RESET_SX)); RREG32(GRBM_SOFT_RESET); mdelay(15); WREG32(GRBM_SOFT_RESET, 0); RREG32(GRBM_SOFT_RESET); WREG32(CP_SEM_WAIT_TIMER, 0x0); WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); /* Set the write pointer delay */ WREG32(CP_RB_WPTR_DELAY, 0); WREG32(CP_DEBUG, (1 << 27)); /* ring 0 - compute and gfx */ /* Set ring buffer size */ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; rb_bufsz = drm_order(ring->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif WREG32(CP_RB0_CNTL, tmp); /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); ring->wptr = 0; WREG32(CP_RB0_WPTR, ring->wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); if (rdev->wb.enabled) WREG32(SCRATCH_UMSK, 0xff); else { tmp |= RB_NO_UPDATE; WREG32(SCRATCH_UMSK, 0); } mdelay(1); WREG32(CP_RB0_CNTL, tmp); WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); ring->rptr = RREG32(CP_RB0_RPTR); /* ring1 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; rb_bufsz = drm_order(ring->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif WREG32(CP_RB1_CNTL, tmp); /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); ring->wptr = 0; WREG32(CP_RB1_WPTR, ring->wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); mdelay(1); WREG32(CP_RB1_CNTL, tmp); WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); ring->rptr = RREG32(CP_RB1_RPTR); /* ring2 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; rb_bufsz = drm_order(ring->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif WREG32(CP_RB2_CNTL, tmp); /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); ring->wptr = 0; WREG32(CP_RB2_WPTR, ring->wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); mdelay(1); WREG32(CP_RB2_CNTL, tmp); WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); ring->rptr = RREG32(CP_RB2_RPTR); /* start the rings */ cayman_cp_start(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; /* this only test cp0 */ r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); if (r) { rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; return r; } return 0; }