Exemple #1
0
static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
{
	int ret;
	u32 val;

	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
	/* Wait for the register to finish posting */
	wmb();

	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
		val & (1 << 1), 100, 10000);
	if (ret) {
		DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
		return ret;
	}

	ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
		!val, 100, 10000);

	if (ret) {
		DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
		return ret;
	}

	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);

	/* Set up CX GMU counter 0 to count busy ticks */
	gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
	gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);

	/* Enable the power counter */
	gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
	return 0;
}
Exemple #2
0
/* Let the GMU know that we are about to go into slumber */
static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
{
	int ret;

	/* Disable the power counter so the GMU isn't busy */
	gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);

	/* Disable SPTP_PC if the CPU is responsible for it */
	if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
		a6xx_sptprac_disable(gmu);

	/* Tell the GMU to get ready to slumber */
	gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);

	ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
	a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);

	if (!ret) {
		/* Check to see if the GMU really did slumber */
		if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
			!= 0x0f) {
			DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
			ret = -ETIMEDOUT;
		}
	}

	/* Put fence into allow mode */
	gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
	return ret;
}
Exemple #3
0
static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
{
	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
	struct msm_gpu *gpu = &adreno_gpu->base;
	int ret;

	gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);

	gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
		((3 & 0xf) << 28) | index);

	/*
	 * Send an invalid index as a vote for the bus bandwidth and let the
	 * firmware decide on the right vote
	 */
	gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);

	/* Set and clear the OOB for DCVS to trigger the GMU */
	a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
	a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);

	ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
	if (ret)
		dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);

	gmu->freq = gmu->gpu_freqs[index];

	/*
	 * Eventually we will want to scale the path vote with the frequency but
	 * for now leave it at max so that the performance is nominal.
	 */
	icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
}
Exemple #4
0
static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
{
	int ret;
	u32 val;

	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
	/* Wait for the register to finish posting */
	wmb();

	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
		val & (1 << 1), 100, 10000);
	if (ret) {
		dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
		return ret;
	}

	ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
		!val, 100, 10000);

	if (!ret) {
		gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);

		/* Re-enable the power counter */
		gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
		return 0;
	}

	dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
	return ret;
}
Exemple #5
0
/* Set up the idle state for the GMU */
static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
{
	/* Disable GMU WB/RB buffer */
	gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);

	gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);

	switch (gmu->idle_level) {
	case GMU_IDLE_STATE_IFPC:
		gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
			GMU_PWR_COL_HYST);
		gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
		/* Fall through */
	case GMU_IDLE_STATE_SPTP:
		gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
			GMU_PWR_COL_HYST);
		gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
	}

	/* Enable RPMh GPU client */
	gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
		A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
		A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
		A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
		A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
		A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
		A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
}
Exemple #6
0
static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
{
	disable_irq(gmu->gmu_irq);
	disable_irq(gmu->hfi_irq);

	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
}
Exemple #7
0
static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
{
	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);

	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
		~A6XX_GMU_IRQ_MASK);
	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
		~A6XX_HFI_IRQ_MASK);

	enable_irq(gmu->gmu_irq);
	enable_irq(gmu->hfi_irq);
}
Exemple #8
0
static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
{
	int ret;
	u32 val;

	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);

	ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
		val, val & (1 << 16), 100, 10000);
	if (ret)
		DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");

	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
}
Exemple #9
0
/* Let the GMU know we are starting a boot sequence */
static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
{
	u32 vote;

	/* Let the GMU know we are getting ready for boot */
	gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);

	/* Choose the "default" power level as the highest available */
	vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];

	gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
	gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);

	/* Let the GMU know the boot sequence has started */
	return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
}
Exemple #10
0
static int a6xx_gmu_start(struct a6xx_gmu *gmu)
{
	int ret;
	u32 val;

	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);

	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
		val == 0xbabeface, 100, 10000);

	if (ret)
		DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");

	return ret;
}
Exemple #11
0
/* Clear a pending OOB state in the GMU */
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
	switch (state) {
	case GMU_OOB_GPU_SET:
		gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
			1 << GMU_OOB_GPU_SET_CLEAR);
		break;
	case GMU_OOB_BOOT_SLUMBER:
		gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
			1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
		break;
	case GMU_OOB_DCVS_SET:
		gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
			1 << GMU_OOB_DCVS_CLEAR);
		break;
	}
}
Exemple #12
0
/* Trigger a OOB (out of band) request to the GMU */
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
	int ret;
	u32 val;
	int request, ack;
	const char *name;

	switch (state) {
	case GMU_OOB_GPU_SET:
		request = GMU_OOB_GPU_SET_REQUEST;
		ack = GMU_OOB_GPU_SET_ACK;
		name = "GPU_SET";
		break;
	case GMU_OOB_BOOT_SLUMBER:
		request = GMU_OOB_BOOT_SLUMBER_REQUEST;
		ack = GMU_OOB_BOOT_SLUMBER_ACK;
		name = "BOOT_SLUMBER";
		break;
	case GMU_OOB_DCVS_SET:
		request = GMU_OOB_DCVS_REQUEST;
		ack = GMU_OOB_DCVS_ACK;
		name = "GPU_DCVS";
		break;
	default:
		return -EINVAL;
	}

	/* Trigger the equested OOB operation */
	gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);

	/* Wait for the acknowledge interrupt */
	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
		val & (1 << ack), 100, 10000);

	if (ret)
		DRM_DEV_ERROR(gmu->dev,
			"Timeout waiting for GMU OOB set %s: 0x%x\n",
				name,
				gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));

	/* Clear the acknowledge interrupt */
	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);

	return ret;
}
Exemple #13
0
static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
{
	gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);

	gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
		((index << 24) & 0xff) | (3 & 0xf));

	/*
	 * Send an invalid index as a vote for the bus bandwidth and let the
	 * firmware decide on the right vote
	 */
	gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);

	/* Set and clear the OOB for DCVS to trigger the GMU */
	a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
	a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);

	return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
}
Exemple #14
0
static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
{
	u32 val;
	int ret;

	gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);

	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
		val & 1, 100, 10000);
	if (ret)
		DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");

	return ret;
}
Exemple #15
0
static irqreturn_t a6xx_hfi_irq(int irq, void *data)
{
	struct a6xx_gmu *gmu = data;
	u32 status;

	status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);

	if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
		dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");

		a6xx_gmu_fault(gmu);
	}

	return IRQ_HANDLED;
}
Exemple #16
0
static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
{
	u32 val;
	int ret;

	gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
		A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);

	gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);

	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
		val & 1, 100, 10000);
	if (ret)
		dev_err(gmu->dev, "Unable to start the HFI queues\n");

	return ret;
}
Exemple #17
0
/* Disable CPU control of SPTP power power collapse */
static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
{
	u32 val;
	int ret;

	/* Make sure retention is on */
	gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));

	gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);

	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
		(val & 0x04), 100, 10000);

	if (ret)
		DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
Exemple #18
0
/* Enable CPU control of SPTP power power collapse */
static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
{
	int ret;
	u32 val;

	gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);

	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
		(val & 0x38) == 0x28, 1, 100);

	if (ret) {
		DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
	}

	return 0;
}
Exemple #19
0
static irqreturn_t a6xx_hfi_irq(int irq, void *data)
{
	struct a6xx_gmu *gmu = data;
	u32 status;

	status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);

	if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
		tasklet_schedule(&gmu->hfi_tasklet);

	if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
		dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");

		/* Temporary until we can recover safely */
		BUG();
	}

	return IRQ_HANDLED;
}
Exemple #20
0
static irqreturn_t a6xx_gmu_irq(int irq, void *data)
{
	struct a6xx_gmu *gmu = data;
	u32 status;

	status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);

	if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
		dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");

		a6xx_gmu_fault(gmu);
	}

	if (status &  A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
		dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");

	if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
		dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
			gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));

	return IRQ_HANDLED;
}
Exemple #21
0
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
	struct platform_device *pdev = to_platform_device(gmu->dev);
	void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
	void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");

	if (!pdcptr || !seqptr)
		goto err;

	/* Disable SDE clock gating */
	gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));

	/* Setup RSC PDC handshake for sleep and wakeup */
	gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
	gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
	gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
	gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
	gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);

	/* Load RSC sequencer uCode for sleep and wakeup */
	gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
	gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
	gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
	gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
	gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);

	/* Load PDC sequencer uCode for power up and power down sequence */
	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);

	/* Set TCS commands used by PDC sequence for low power modes */
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);

	/* Setup GPU PDC */
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
	pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);

	/* ensure no writes happen before the uCode is fully written */
	wmb();

err:
	if (!IS_ERR_OR_NULL(pdcptr))
		iounmap(pdcptr);
	if (!IS_ERR_OR_NULL(seqptr))
		iounmap(seqptr);
}
Exemple #22
0
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{
	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
	struct msm_gpu *gpu = &adreno_gpu->base;
	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
	int status, ret;

	if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
		return 0;

	gmu->hung = false;

	/* Turn on the resources */
	pm_runtime_get_sync(gmu->dev);

	/* Use a known rate to bring up the GMU */
	clk_set_rate(gmu->core_clk, 200000000);
	ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
	if (ret) {
		pm_runtime_put(gmu->dev);
		return ret;
	}

	/* Set the bus quota to a reasonable value for boot */
	icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));

	/* Enable the GMU interrupt */
	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
	enable_irq(gmu->gmu_irq);

	/* Check to see if we are doing a cold or warm boot */
	status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
		GMU_WARM_BOOT : GMU_COLD_BOOT;

	ret = a6xx_gmu_fw_start(gmu, status);
	if (ret)
		goto out;

	ret = a6xx_hfi_start(gmu, status);
	if (ret)
		goto out;

	/*
	 * Turn on the GMU firmware fault interrupt after we know the boot
	 * sequence is successful
	 */
	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
	enable_irq(gmu->hfi_irq);

	/* Set the GPU to the highest power frequency */
	__a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);

	/*
	 * "enable" the GX power domain which won't actually do anything but it
	 * will make sure that the refcounting is correct in case we need to
	 * bring down the GX after a GMU failure
	 */
	if (!IS_ERR_OR_NULL(gmu->gxpd))
		pm_runtime_get(gmu->gxpd);

out:
	/* On failure, shut down the GMU to leave it in a good state */
	if (ret) {
		disable_irq(gmu->gmu_irq);
		a6xx_rpmh_stop(gmu);
		pm_runtime_put(gmu->dev);
	}

	return ret;
}
Exemple #23
0
static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
	static bool rpmh_init;
	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
	int i, ret;
	u32 chipid;
	u32 *image;

	if (state == GMU_WARM_BOOT) {
		ret = a6xx_rpmh_start(gmu);
		if (ret)
			return ret;
	} else {
		if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
			"GMU firmware is not loaded\n"))
			return -ENOENT;

		/* Sanity check the size of the firmware that was loaded */
		if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
			DRM_DEV_ERROR(gmu->dev,
				"GMU firmware is bigger than the available region\n");
			return -EINVAL;
		}

		/* Turn on register retention */
		gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);

		/* We only need to load the RPMh microcode once */
		if (!rpmh_init) {
			a6xx_gmu_rpmh_init(gmu);
			rpmh_init = true;
		} else {
			ret = a6xx_rpmh_start(gmu);
			if (ret)
				return ret;
		}

		image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;

		for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
			gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
				image[i]);
	}

	gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
	gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);

	/* Write the iova of the HFI table */
	gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
	gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);

	gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
		(1 << 31) | (0xa << 18) | (0xa0));

	chipid = adreno_gpu->rev.core << 24;
	chipid |= adreno_gpu->rev.major << 16;
	chipid |= adreno_gpu->rev.minor << 12;
	chipid |= adreno_gpu->rev.patchid << 8;

	gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);

	/* Set up the lowest idle level on the GMU */
	a6xx_gmu_power_config(gmu);

	ret = a6xx_gmu_start(gmu);
	if (ret)
		return ret;

	ret = a6xx_gmu_gfx_rail_on(gmu);
	if (ret)
		return ret;

	/* Enable SPTP_PC if the CPU is responsible for it */
	if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
		ret = a6xx_sptprac_enable(gmu);
		if (ret)
			return ret;
	}

	ret = a6xx_gmu_hfi_start(gmu);
	if (ret)
		return ret;

	/* FIXME: Do we need this wmb() here? */
	wmb();

	return 0;
}