Esempio n. 1
0
static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
					 unsigned vm_id, uint64_t pd_addr)
{
	uint32_t reg;

	if (vm_id < 8)
		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
	else
		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;

	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
	amdgpu_ring_write(ring, reg << 2);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
	amdgpu_ring_write(ring, pd_addr >> 12);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
	amdgpu_ring_write(ring, 0x8);

	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
	amdgpu_ring_write(ring, 1 << vm_id);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
	amdgpu_ring_write(ring, 0x8);

	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
	amdgpu_ring_write(ring, 0);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
	amdgpu_ring_write(ring, 1 << vm_id); /* mask */
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
	amdgpu_ring_write(ring, 0xC);
}
Esempio n. 2
0
/**
 * uvd_v1_0_init - start and test UVD block
 *
 * @rdev: radeon_device pointer
 *
 * Initialize the hardware, boot up the VCPU and do some testing
 */
int uvd_v1_0_init(struct radeon_device *rdev)
{
	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
	uint32_t tmp;
	int r;

	/* raise clocks while booting up the VCPU */
	if (rdev->family < CHIP_RV740)
		radeon_set_uvd_clocks(rdev, 10000, 10000);
	else
		radeon_set_uvd_clocks(rdev, 53300, 40000);

	r = uvd_v1_0_start(rdev);
	if (r)
		goto done;

	ring->ready = true;
	r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
	if (r) {
		ring->ready = false;
		goto done;
	}

	r = radeon_ring_lock(rdev, ring, 10);
	if (r) {
		DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
		goto done;
	}

	tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
	radeon_ring_write(ring, tmp);
	radeon_ring_write(ring, 0xFFFFF);

	tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
	radeon_ring_write(ring, tmp);
	radeon_ring_write(ring, 0xFFFFF);

	tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
	radeon_ring_write(ring, tmp);
	radeon_ring_write(ring, 0xFFFFF);

	/* Clear timeout status bits */
	radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
	radeon_ring_write(ring, 0x8);

	radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
	radeon_ring_write(ring, 3);

	radeon_ring_unlock_commit(rdev, ring);

done:
	/* lower clocks again */
	radeon_set_uvd_clocks(rdev, 0, 0);

	if (!r)
		DRM_INFO("UVD initialized successfully.\n");

	return r;
}
/**
 * uvd_v5_0_hw_init - start and test UVD block
 *
 * @adev: amdgpu_device pointer
 *
 * Initialize the hardware, boot up the VCPU and do some testing
 */
static int uvd_v5_0_hw_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	struct amdgpu_ring *ring = &adev->uvd.ring;
	uint32_t tmp;
	int r;

	/* raise clocks while booting up the VCPU */
	amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);

	r = uvd_v5_0_start(adev);
	if (r)
		goto done;

	ring->ready = true;
	r = amdgpu_ring_test_ring(ring);
	if (r) {
		ring->ready = false;
		goto done;
	}

	r = amdgpu_ring_alloc(ring, 10);
	if (r) {
		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
		goto done;
	}

	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
	amdgpu_ring_write(ring, tmp);
	amdgpu_ring_write(ring, 0xFFFFF);

	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
	amdgpu_ring_write(ring, tmp);
	amdgpu_ring_write(ring, 0xFFFFF);

	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
	amdgpu_ring_write(ring, tmp);
	amdgpu_ring_write(ring, 0xFFFFF);

	/* Clear timeout status bits */
	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
	amdgpu_ring_write(ring, 0x8);

	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
	amdgpu_ring_write(ring, 3);

	amdgpu_ring_commit(ring);

done:
	/* lower clocks again */
	amdgpu_asic_set_uvd_clocks(adev, 0, 0);

	if (!r)
		DRM_INFO("UVD initialized successfully.\n");

	return r;
}
Esempio n. 4
0
/**
 * uvd_v1_0_ib_execute - execute indirect buffer
 *
 * @rdev: radeon_device pointer
 * @ib: indirect buffer to execute
 *
 * Write ring commands to execute the indirect buffer
 */
void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
	struct radeon_ring *ring = &rdev->ring[ib->ring];

	radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
	radeon_ring_write(ring, ib->gpu_addr);
	radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
	radeon_ring_write(ring, ib->length_dw);
}
/**
 * uvd_v6_0_ring_emit_ib - execute indirect buffer
 *
 * @ring: amdgpu_ring pointer
 * @ib: indirect buffer to execute
 *
 * Write ring commands to execute the indirect buffer
 */
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
				  struct amdgpu_ib *ib)
{
	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
	amdgpu_ring_write(ring, ib->length_dw);
}
Esempio n. 6
0
static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
				    uint32_t reg, uint32_t val)
{
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
	amdgpu_ring_write(ring, reg << 2);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
	amdgpu_ring_write(ring, val);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
	amdgpu_ring_write(ring, 0x8);
}
Esempio n. 7
0
/**
 * uvd_v5_0_hw_init - start and test UVD block
 *
 * @adev: amdgpu_device pointer
 *
 * Initialize the hardware, boot up the VCPU and do some testing
 */
static int uvd_v5_0_hw_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	struct amdgpu_ring *ring = &adev->uvd.ring;
	uint32_t tmp;
	int r;

	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
	uvd_v5_0_enable_mgcg(adev, true);

	ring->ready = true;
	r = amdgpu_ring_test_ring(ring);
	if (r) {
		ring->ready = false;
		goto done;
	}

	r = amdgpu_ring_alloc(ring, 10);
	if (r) {
		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
		goto done;
	}

	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
	amdgpu_ring_write(ring, tmp);
	amdgpu_ring_write(ring, 0xFFFFF);

	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
	amdgpu_ring_write(ring, tmp);
	amdgpu_ring_write(ring, 0xFFFFF);

	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
	amdgpu_ring_write(ring, tmp);
	amdgpu_ring_write(ring, 0xFFFFF);

	/* Clear timeout status bits */
	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
	amdgpu_ring_write(ring, 0x8);

	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
	amdgpu_ring_write(ring, 3);

	amdgpu_ring_commit(ring);

done:
	if (!r)
		DRM_INFO("UVD initialized successfully.\n");

	return r;

}
Esempio n. 8
0
/**
 * uvd_v6_0_ring_emit_ib - execute indirect buffer
 *
 * @ring: amdgpu_ring pointer
 * @ib: indirect buffer to execute
 *
 * Write ring commands to execute the indirect buffer
 */
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
				  struct amdgpu_ib *ib,
				  unsigned vm_id, bool ctx_switch)
{
	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
	amdgpu_ring_write(ring, vm_id);

	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
	amdgpu_ring_write(ring, ib->length_dw);
}
Esempio n. 9
0
static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
					unsigned vmid, uint64_t pd_addr)
{
	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);

	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
	amdgpu_ring_write(ring, 0);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
	amdgpu_ring_write(ring, 1 << vmid); /* mask */
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
	amdgpu_ring_write(ring, 0xC);
}
Esempio n. 10
0
/**
 * uvd_v1_0_semaphore_emit - emit semaphore command
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring pointer
 * @semaphore: semaphore to emit commands for
 * @emit_wait: true if we should emit a wait command
 *
 * Emit a semaphore command (either wait or signal) to the UVD ring.
 */
void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
			     struct radeon_ring *ring,
			     struct radeon_semaphore *semaphore,
			     bool emit_wait)
{
	uint64_t addr = semaphore->gpu_addr;

	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);

	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);

	radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
	radeon_ring_write(ring, emit_wait ? 1 : 0);
}
Esempio n. 11
0
/**
 * uvd_v1_0_ring_test - register write test
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring pointer
 *
 * Test if we can successfully write to the context register
 */
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
	uint32_t tmp = 0;
	unsigned i;
	int r;

	WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
	r = radeon_ring_lock(rdev, ring, 3);
	if (r) {
		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
			  ring->idx, r);
		return r;
	}
	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
	radeon_ring_write(ring, 0xDEADBEEF);
	radeon_ring_unlock_commit(rdev, ring);
	for (i = 0; i < rdev->usec_timeout; i++) {
		tmp = RREG32(UVD_CONTEXT_ID);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}

	if (i < rdev->usec_timeout) {
		DRM_INFO("ring test on %d succeeded in %d usecs\n",
			 ring->idx, i);
	} else {
		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
			  ring->idx, tmp);
		r = -EINVAL;
	}
	return r;
}
Esempio n. 12
0
static int uvd_v6_0_sw_init(void *handle)
{
	struct amdgpu_ring *ring;
	int r;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	/* UVD TRAP */
	r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
	if (r)
		return r;

	r = amdgpu_uvd_sw_init(adev);
	if (r)
		return r;

	r = amdgpu_uvd_resume(adev);
	if (r)
		return r;

	ring = &adev->uvd.ring;
	sprintf(ring->name, "uvd");
	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);

	return r;
}
Esempio n. 13
0
/**
 * uvd_v6_0_ring_test_ring - register write test
 *
 * @ring: amdgpu_ring pointer
 *
 * Test if we can successfully write to the context register
 */
static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
{
	struct amdgpu_device *adev = ring->adev;
	uint32_t tmp = 0;
	unsigned i;
	int r;

	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
	r = amdgpu_ring_alloc(ring, 3);
	if (r)
		return r;

	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
	amdgpu_ring_write(ring, 0xDEADBEEF);
	amdgpu_ring_commit(ring);
	for (i = 0; i < adev->usec_timeout; i++) {
		tmp = RREG32(mmUVD_CONTEXT_ID);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}

	if (i >= adev->usec_timeout)
		r = -ETIMEDOUT;

	return r;
}
Esempio n. 14
0
static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
	uint32_t seq = ring->fence_drv.sync_seq;
	uint64_t addr = ring->fence_drv.gpu_addr;

	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
	amdgpu_ring_write(ring, lower_32_bits(addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
	amdgpu_ring_write(ring, upper_32_bits(addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
	amdgpu_ring_write(ring, 0xffffffff); /* mask */
	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
	amdgpu_ring_write(ring, seq);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
	amdgpu_ring_write(ring, 0xE);
}
Esempio n. 15
0
/**
 * uvd_v6_0_ring_test_ring - register write test
 *
 * @ring: amdgpu_ring pointer
 *
 * Test if we can successfully write to the context register
 */
static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
{
	struct amdgpu_device *adev = ring->adev;
	uint32_t tmp = 0;
	unsigned i;
	int r;

	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
	r = amdgpu_ring_alloc(ring, 3);
	if (r) {
		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
			  ring->idx, r);
		return r;
	}
	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
	amdgpu_ring_write(ring, 0xDEADBEEF);
	amdgpu_ring_commit(ring);
	for (i = 0; i < adev->usec_timeout; i++) {
		tmp = RREG32(mmUVD_CONTEXT_ID);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}

	if (i < adev->usec_timeout) {
		DRM_INFO("ring test on %d succeeded in %d usecs\n",
			 ring->idx, i);
	} else {
		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
			  ring->idx, tmp);
		r = -EINVAL;
	}
	return r;
}
Esempio n. 16
0
void rv515_ring_start(struct radeon_device *rdev)
{
	int r;

	r = radeon_ring_lock(rdev, 64);
	if (r) {
		return;
	}
	radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
	radeon_ring_write(rdev,
			  ISYNC_ANY2D_IDLE3D |
			  ISYNC_ANY3D_IDLE2D |
			  ISYNC_WAIT_IDLEGUI |
			  ISYNC_CPSCRATCH_IDLEGUI);
	radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
	radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
<<<<<<< HEAD
Esempio n. 17
0
/**
 * uvd_v6_0_ring_emit_ib - execute indirect buffer
 *
 * @ring: amdgpu_ring pointer
 * @ib: indirect buffer to execute
 *
 * Write ring commands to execute the indirect buffer
 */
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
				  struct amdgpu_job *job,
				  struct amdgpu_ib *ib,
				  uint32_t flags)
{
	unsigned vmid = AMDGPU_JOB_GET_VMID(job);

	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
	amdgpu_ring_write(ring, vmid);

	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
	amdgpu_ring_write(ring, ib->length_dw);
}
Esempio n. 18
0
static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
	int i;

	WARN_ON(ring->wptr % 2 || count % 2);

	for (i = 0; i < count / 2; i++) {
		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
		amdgpu_ring_write(ring, 0);
	}
}
Esempio n. 19
0
/**
 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
 *
 * @ring: amdgpu_ring pointer
 * @fence: fence to emit
 *
 * Write a fence and a trap command to the ring.
 */
static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
				     unsigned flags)
{
	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);

	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
	amdgpu_ring_write(ring, seq);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
	amdgpu_ring_write(ring, addr & 0xffffffff);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
	amdgpu_ring_write(ring, 0);

	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
	amdgpu_ring_write(ring, 0);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
	amdgpu_ring_write(ring, 0);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
	amdgpu_ring_write(ring, 2);
}
Esempio n. 20
0
	.hw_init = uvd_v5_0_hw_init,
	.hw_fini = uvd_v5_0_hw_fini,
	.suspend = uvd_v5_0_suspend,
	.resume = uvd_v5_0_resume,
	.is_idle = uvd_v5_0_is_idle,
	.wait_for_idle = uvd_v5_0_wait_for_idle,
	.soft_reset = uvd_v5_0_soft_reset,
	.set_clockgating_state = uvd_v5_0_set_clockgating_state,
	.set_powergating_state = uvd_v5_0_set_powergating_state,
	.get_clockgating_state = uvd_v5_0_get_clockgating_state,
};

static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
	.type = AMDGPU_RING_TYPE_UVD,
	.align_mask = 0xf,
	.nop = PACKET0(mmUVD_NO_OP, 0),
	.support_64bit_ptrs = false,
	.get_rptr = uvd_v5_0_ring_get_rptr,
	.get_wptr = uvd_v5_0_ring_get_wptr,
	.set_wptr = uvd_v5_0_ring_set_wptr,
	.parse_cs = amdgpu_uvd_ring_parse_cs,
	.emit_frame_size =
		2 + /* uvd_v5_0_ring_emit_hdp_flush */
		2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
		14, /* uvd_v5_0_ring_emit_fence  x1 no user fence */
	.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
	.emit_ib = uvd_v5_0_ring_emit_ib,
	.emit_fence = uvd_v5_0_ring_emit_fence,
	.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
	.emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
	.test_ring = uvd_v5_0_ring_test_ring,
Esempio n. 21
0
/**
 * uvd_v6_0_hw_init - start and test UVD block
 *
 * @adev: amdgpu_device pointer
 *
 * Initialize the hardware, boot up the VCPU and do some testing
 */
static int uvd_v6_0_hw_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
	uint32_t tmp;
	int i, r;

	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
	uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
	uvd_v6_0_enable_mgcg(adev, true);

	r = amdgpu_ring_test_helper(ring);
	if (r)
		goto done;

	r = amdgpu_ring_alloc(ring, 10);
	if (r) {
		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
		goto done;
	}

	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
	amdgpu_ring_write(ring, tmp);
	amdgpu_ring_write(ring, 0xFFFFF);

	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
	amdgpu_ring_write(ring, tmp);
	amdgpu_ring_write(ring, 0xFFFFF);

	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
	amdgpu_ring_write(ring, tmp);
	amdgpu_ring_write(ring, 0xFFFFF);

	/* Clear timeout status bits */
	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
	amdgpu_ring_write(ring, 0x8);

	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
	amdgpu_ring_write(ring, 3);

	amdgpu_ring_commit(ring);

	if (uvd_v6_0_enc_support(adev)) {
		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
			ring = &adev->uvd.inst->ring_enc[i];
			r = amdgpu_ring_test_helper(ring);
			if (r)
				goto done;
		}
	}

done:
	if (!r) {
		if (uvd_v6_0_enc_support(adev))
			DRM_INFO("UVD and UVD ENC initialized successfully.\n");
		else
			DRM_INFO("UVD initialized successfully.\n");
	}

	return r;
}
Esempio n. 22
0
	int r;

	r = radeon_ring_lock(rdev, 64);
	if (r) {
		return;
	}
	radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
	radeon_ring_write(rdev,
			  ISYNC_ANY2D_IDLE3D |
			  ISYNC_ANY3D_IDLE2D |
			  ISYNC_WAIT_IDLEGUI |
			  ISYNC_CPSCRATCH_IDLEGUI);
	radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
	radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
<<<<<<< HEAD
	radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
	radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
=======
	radeon_ring_write(rdev, PACKET0(0x170C, 0));
	radeon_ring_write(rdev, 1 << 31);
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
	radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
	radeon_ring_write(rdev, 0);
<<<<<<< HEAD
	radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
=======
	radeon_ring_write(rdev, PACKET0(0x42C8, 0));
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
	radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
Esempio n. 23
0
/**
 * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
 *
 * @ring: amdgpu_ring pointer
 *
 * Emits an hdp invalidate.
 */
static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
	amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
	amdgpu_ring_write(ring, 1);
}
Esempio n. 24
0
/**
 * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
 *
 * @ring: amdgpu_ring pointer
 *
 * Emits an hdp flush.
 */
static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
	amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
	amdgpu_ring_write(ring, 0);
}