int trinity_dpm_config(struct radeon_device *rdev, bool enable) { if (enable) WREG32_SMC(SMU_SCRATCH0, 1); else WREG32_SMC(SMU_SCRATCH0, 0); return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Config); }
static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, u32 cntl_reg, u32 status_reg) { int r, i; struct atom_clock_dividers dividers; uint32_t tmp; r = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, clock, false, ÷rs); if (r) return r; tmp = RREG32_SMC(cntl_reg); tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | CG_DCLK_CNTL__DCLK_DIVIDER_MASK); tmp |= dividers.post_divider; WREG32_SMC(cntl_reg, tmp); for (i = 0; i < 100; i++) { if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) break; mdelay(10); } if (i == 100) return -ETIMEDOUT; return 0; }
void ci_reset_smc(struct radeon_device *rdev) { u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); tmp |= RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); }
void amdgpu_si_start_smc(struct amdgpu_device *adev) { u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); tmp &= ~RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); }
void si_start_smc_clock(struct radeon_device *rdev) { u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); tmp &= ~CK_DISABLE; WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); }
void si_start_smc(struct radeon_device *rdev) { u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); tmp &= ~RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); }
static bool vi_read_disabled_bios(struct amdgpu_device *adev) { u32 bus_cntl; u32 d1vga_control = 0; u32 d2vga_control = 0; u32 vga_render_control = 0; u32 rom_cntl; bool r; bus_cntl = RREG32(mmBUS_CNTL); if (adev->mode_info.num_crtc) { d1vga_control = RREG32(mmD1VGA_CONTROL); d2vga_control = RREG32(mmD2VGA_CONTROL); vga_render_control = RREG32(mmVGA_RENDER_CONTROL); } rom_cntl = RREG32_SMC(ixROM_CNTL); /* enable the rom */ WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); if (adev->mode_info.num_crtc) { /* Disable VGA mode */ WREG32(mmD1VGA_CONTROL, (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); WREG32(mmD2VGA_CONTROL, (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); WREG32(mmVGA_RENDER_CONTROL, (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); } WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); r = amdgpu_read_bios(adev); /* restore regs */ WREG32(mmBUS_CNTL, bus_cntl); if (adev->mode_info.num_crtc) { WREG32(mmD1VGA_CONTROL, d1vga_control); WREG32(mmD2VGA_CONTROL, d2vga_control); WREG32(mmVGA_RENDER_CONTROL, vga_render_control); } WREG32_SMC(ixROM_CNTL, rom_cntl); return r; }
void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable) { u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); if (enable) tmp &= ~CK_DISABLE; else tmp |= CK_DISABLE; WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); }
static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) { u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); if (enable) tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; else tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); }
static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable) { u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); if (enable) tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); else tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); }
void amdgpu_si_reset_smc(struct amdgpu_device *adev) { u32 tmp; RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) | RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); }
void si_reset_smc(struct radeon_device *rdev) { u32 tmp; RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); tmp |= RST_REG; WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); }
static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) { int r, i; struct atom_clock_dividers dividers; u32 tmp; r = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, ecclk, false, ÷rs); if (r) return r; for (i = 0; i < 100; i++) { if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) break; mdelay(10); } if (i == 100) return -ETIMEDOUT; tmp = RREG32_SMC(ixCG_ECLK_CNTL); tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK); tmp |= dividers.post_divider; WREG32_SMC(ixCG_ECLK_CNTL, tmp); for (i = 0; i < 100; i++) { if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) break; mdelay(10); } if (i == 100) return -ETIMEDOUT; return 0; }
int trinity_dpm_n_levels_disabled(struct radeon_device *rdev, u32 n) { WREG32_SMC(SMU_SCRATCH0, n); return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_N_LevelsDisabled); }
int trinity_dpm_force_state(struct radeon_device *rdev, u32 n) { WREG32_SMC(SMU_SCRATCH0, n); return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_ForceState); }