static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, u32 cntl_reg, u32 status_reg) { int r, i; struct atom_clock_dividers dividers; uint32_t tmp; r = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, clock, false, ÷rs); if (r) return r; tmp = RREG32_SMC(cntl_reg); tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | CG_DCLK_CNTL__DCLK_DIVIDER_MASK); tmp |= dividers.post_divider; WREG32_SMC(cntl_reg, tmp); for (i = 0; i < 100; i++) { if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) break; mdelay(10); } if (i == 100) return -ETIMEDOUT; return 0; }
static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int data; mutex_lock(&adev->pm.mutex); if (adev->flags & AMD_IS_APU) data = RREG32_SMC(ixCURRENT_PG_STATUS_APU); else data = RREG32_SMC(ixCURRENT_PG_STATUS); if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); goto out; } /* AMD_CG_SUPPORT_UVD_MGCG */ data = RREG32(mmUVD_CGC_CTRL); if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) *flags |= AMD_CG_SUPPORT_UVD_MGCG; out: mutex_unlock(&adev->pm.mutex); }
bool amdgpu_si_is_smc_running(struct amdgpu_device *adev) { u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL); u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); if (!(rst & RST_REG) && !(clk & CK_DISABLE)) return true; return false; }
bool ci_is_smc_running(struct radeon_device *rdev) { u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); u32 pc_c = RREG32_SMC(SMC_PC_C); if (!(clk & CK_DISABLE) && (0x20100 <= pc_c)) return true; return false; }
static uint32_t vi_get_rev_id(struct amdgpu_device *adev) { if (adev->flags & AMD_IS_APU) return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) >> ATI_REV_ID_FUSE_MACRO__SHIFT; else return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
void ci_reset_smc(struct radeon_device *rdev) { u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); tmp |= RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); }
void si_start_smc_clock(struct radeon_device *rdev) { u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); tmp &= ~CK_DISABLE; WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); }
void si_start_smc(struct radeon_device *rdev) { u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); tmp &= ~RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); }
void amdgpu_si_start_smc(struct amdgpu_device *adev) { u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); tmp &= ~RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); }
/** * vi_get_xclk - get the xclk * * @adev: amdgpu_device pointer * * Returns the reference clock used by the gfx engine * (VI). */ static u32 vi_get_xclk(struct amdgpu_device *adev) { u32 reference_clock = adev->clock.spll.reference_freq; u32 tmp; if (adev->flags & AMD_IS_APU) return reference_clock; tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) return 1000; tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) return reference_clock / 4; return reference_clock; }
int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask) { int ret; ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask); if (ret == 0) *enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0); return ret; }
void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable) { u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); if (enable) tmp &= ~CK_DISABLE; else tmp |= CK_DISABLE; WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); }
static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) { u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); if (enable) tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; else tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); }
int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask) { int ret; ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_SCLKDPM_GetEnabledMask); if (ret == 0) *enable_mask = RREG32_SMC(SMC_SYSCON_MSG_ARG_0); return ret; }
static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable) { u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); if (enable) tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); else tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); }
void si_reset_smc(struct radeon_device *rdev) { u32 tmp; RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); tmp |= RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); }
void amdgpu_si_reset_smc(struct amdgpu_device *adev) { u32 tmp; RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) | RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); }
static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) { int r, i; struct atom_clock_dividers dividers; u32 tmp; r = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, ecclk, false, ÷rs); if (r) return r; for (i = 0; i < 100; i++) { if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) break; mdelay(10); } if (i == 100) return -ETIMEDOUT; tmp = RREG32_SMC(ixCG_ECLK_CNTL); tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK); tmp |= dividers.post_divider; WREG32_SMC(ixCG_ECLK_CNTL, tmp); for (i = 0; i < 100; i++) { if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) break; mdelay(10); } if (i == 100) return -ETIMEDOUT; return 0; }
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev) { u32 tmp; int i; if (!amdgpu_si_is_smc_running(adev)) return PPSMC_Result_OK; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); if ((tmp & CKEN) == 0) break; udelay(1); } return PPSMC_Result_OK; }
static bool vi_read_disabled_bios(struct amdgpu_device *adev) { u32 bus_cntl; u32 d1vga_control = 0; u32 d2vga_control = 0; u32 vga_render_control = 0; u32 rom_cntl; bool r; bus_cntl = RREG32(mmBUS_CNTL); if (adev->mode_info.num_crtc) { d1vga_control = RREG32(mmD1VGA_CONTROL); d2vga_control = RREG32(mmD2VGA_CONTROL); vga_render_control = RREG32(mmVGA_RENDER_CONTROL); } rom_cntl = RREG32_SMC(ixROM_CNTL); /* enable the rom */ WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); if (adev->mode_info.num_crtc) { /* Disable VGA mode */ WREG32(mmD1VGA_CONTROL, (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); WREG32(mmD2VGA_CONTROL, (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); WREG32(mmVGA_RENDER_CONTROL, (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); } WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); r = amdgpu_read_bios(adev); /* restore regs */ WREG32(mmBUS_CNTL, bus_cntl); if (adev->mode_info.num_crtc) { WREG32(mmD1VGA_CONTROL, d1vga_control); WREG32(mmD2VGA_CONTROL, d2vga_control); WREG32(mmVGA_RENDER_CONTROL, vga_render_control); } WREG32_SMC(ixROM_CNTL, rom_cntl); return r; }
static int uvd_v6_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->uvd.num_uvd_inst = 1; if (!(adev->flags & AMD_IS_APU) && (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) return -ENOENT; uvd_v6_0_set_ring_funcs(adev); if (uvd_v6_0_enc_support(adev)) { adev->uvd.num_enc_rings = 2; uvd_v6_0_set_enc_ring_funcs(adev); } uvd_v6_0_set_irq_funcs(adev); return 0; }