/* Enable CPU control of SPTP power power collapse */ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) { int ret; u32 val; gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, (val & 0x38) == 0x28, 1, 100); if (ret) { DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); } return 0; }
/* Disable CPU control of SPTP power power collapse */ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) { u32 val; int ret; /* Make sure retention is on */ gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, (val & 0x04), 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); }
/* Trigger a OOB (out of band) request to the GMU */ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { int ret; u32 val; int request, ack; const char *name; switch (state) { case GMU_OOB_GPU_SET: request = GMU_OOB_GPU_SET_REQUEST; ack = GMU_OOB_GPU_SET_ACK; name = "GPU_SET"; break; case GMU_OOB_BOOT_SLUMBER: request = GMU_OOB_BOOT_SLUMBER_REQUEST; ack = GMU_OOB_BOOT_SLUMBER_ACK; name = "BOOT_SLUMBER"; break; case GMU_OOB_DCVS_SET: request = GMU_OOB_DCVS_REQUEST; ack = GMU_OOB_DCVS_ACK; name = "GPU_DCVS"; break; default: return -EINVAL; } /* Trigger the equested OOB operation */ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); /* Wait for the acknowledge interrupt */ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, val & (1 << ack), 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "Timeout waiting for GMU OOB set %s: 0x%x\n", name, gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); /* Clear the acknowledge interrupt */ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); return ret; }
static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) { u32 val; int local = gmu->idle_level; /* SPTP and IFPC both report as IFPC */ if (gmu->idle_level == GMU_IDLE_STATE_SPTP) local = GMU_IDLE_STATE_IFPC; val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); if (val == local) { if (gmu->idle_level != GMU_IDLE_STATE_IFPC || !a6xx_gmu_gx_is_on(gmu)) return true; } return false; }
static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) { gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, ((index << 24) & 0xff) | (3 & 0xf)); /* * Send an invalid index as a vote for the bus bandwidth and let the * firmware decide on the right vote */ gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); /* Set and clear the OOB for DCVS to trigger the GMU */ a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); }
static irqreturn_t a6xx_hfi_irq(int irq, void *data) { struct a6xx_gmu *gmu = data; u32 status; status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ) tasklet_schedule(&gmu->hfi_tasklet); if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); /* Temporary until we can recover safely */ BUG(); } return IRQ_HANDLED; }
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) { struct a6xx_gmu *gmu = &a6xx_gpu->gmu; int status, ret; if (WARN(!gmu->mmio, "The GMU is not set up yet\n")) return 0; /* Turn on the resources */ pm_runtime_get_sync(gmu->dev); /* Use a known rate to bring up the GMU */ clk_set_rate(gmu->core_clk, 200000000); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); if (ret) goto out; a6xx_gmu_irq_enable(gmu); /* Check to see if we are doing a cold or warm boot */ status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? GMU_WARM_BOOT : GMU_COLD_BOOT; ret = a6xx_gmu_fw_start(gmu, status); if (ret) goto out; ret = a6xx_hfi_start(gmu, status); /* Set the GPU to the highest power frequency */ a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); out: /* Make sure to turn off the boot OOB request on error */ if (ret) a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); return ret; }
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; int status, ret; if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) return 0; gmu->hung = false; /* Turn on the resources */ pm_runtime_get_sync(gmu->dev); /* Use a known rate to bring up the GMU */ clk_set_rate(gmu->core_clk, 200000000); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); if (ret) { pm_runtime_put(gmu->dev); return ret; } /* Set the bus quota to a reasonable value for boot */ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072)); /* Enable the GMU interrupt */ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); enable_irq(gmu->gmu_irq); /* Check to see if we are doing a cold or warm boot */ status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? GMU_WARM_BOOT : GMU_COLD_BOOT; ret = a6xx_gmu_fw_start(gmu, status); if (ret) goto out; ret = a6xx_hfi_start(gmu, status); if (ret) goto out; /* * Turn on the GMU firmware fault interrupt after we know the boot * sequence is successful */ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); enable_irq(gmu->hfi_irq); /* Set the GPU to the highest power frequency */ __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); /* * "enable" the GX power domain which won't actually do anything but it * will make sure that the refcounting is correct in case we need to * bring down the GX after a GMU failure */ if (!IS_ERR_OR_NULL(gmu->gxpd)) pm_runtime_get(gmu->gxpd); out: /* On failure, shut down the GMU to leave it in a good state */ if (ret) { disable_irq(gmu->gmu_irq); a6xx_rpmh_stop(gmu); pm_runtime_put(gmu->dev); } return ret; }