Exemple #1
0
static void kgsl_ringbuffer_submit(struct kgsl_ringbuffer *rb)
{
	BUG_ON(rb->wptr == 0);

	GSL_RB_UPDATE_WPTR_POLLING(rb);
	/* Drain write buffer and data memory barrier */
	dsb();
	wmb();

	/* Memory fence to ensure all data has posted.  On some systems,
	* like 7x27, the register block is not allocated as strongly ordered
	* memory.  Adding a memory fence ensures ordering during ringbuffer
	* submits.*/
	mb();
	outer_sync();

	kgsl_yamato_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);

	rb->flags |= KGSL_FLAGS_ACTIVE;
}
/* Set the DDR to either 528MHz or 400MHz for MX6q
 * or 400MHz for MX6DL.
 */
int set_high_bus_freq(int high_bus_freq)
{
	if (bus_freq_scaling_initialized && bus_freq_scaling_is_active)
		cancel_delayed_work_sync(&low_bus_freq_handler);

	if (busfreq_suspended)
		return 0;


	if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active)
		return 0;

	if (cpu_is_mx6sl())
		high_bus_freq = 1;

	if (high_bus_freq_mode && high_bus_freq)
		return 0;


	/* medium bus freq is only supported for MX6DQ */
	if (cpu_is_mx6q() && med_bus_freq_mode && !high_bus_freq)
		return 0;

	if (cpu_is_mx6dl() && high_bus_freq)
		high_bus_freq = 0;

	if (cpu_is_mx6dl() && med_bus_freq_mode)
		return 0;

	if ((high_bus_freq_mode && (high_bus_freq || lp_high_freq)) ||
	    (med_bus_freq_mode && !high_bus_freq && lp_med_freq &&
	     !lp_high_freq))
		return 0;

	if (cpu_is_mx6sl()) {
		u32 reg;
		unsigned long flags;
		u32 ttbr1;

		spin_lock_irqsave(&freq_lock, flags);
		/* sync the outer cache. */
		outer_sync();

		ttbr1 = save_ttbr1();

		/* Change DDR freq in IRAM. */
		mx6sl_ddr_freq_change_iram(ddr_normal_rate, low_bus_freq_mode);

		restore_ttbr1(ttbr1);

		spin_unlock_irqrestore(&freq_lock, flags);

		/* Set periph_clk to be sourced from pll2_pfd2_400M */
		/* First need to set the divider before changing the */
		/* parent if parent clock is larger than previous one */
		clk_set_rate(ahb_clk, clk_round_rate(ahb_clk,
						     LPAPM_CLK / 3));
		clk_set_rate(axi_clk,
			     clk_round_rate(axi_clk, LPAPM_CLK / 2));
		clk_set_parent(periph_clk, pll2_400);

		if (low_bus_freq_mode) {
			/* Now move ARM to be sourced from PLL2_400 too. */
			clk_set_parent(pll1_sw_clk, pll2_400);

			/* Ensure that the clock will be at original speed. */
			reg = __raw_writel(org_arm_podf, MXC_CCM_CACRR);
			while (__raw_readl(MXC_CCM_CDHIPR))
				;
			clk_disable(pll1);
		}
		high_bus_freq_mode = 1;
		low_bus_freq_mode = 0;
		audio_bus_freq_mode = 0;
	} else {
		clk_enable(pll3);
		if (high_bus_freq) {
			update_ddr_freq(ddr_normal_rate);
			/* Make sure periph clk's parent also got updated */
			clk_set_parent(periph_clk, pll2);
			if (med_bus_freq_mode)
				clk_disable(pll2_400);
			high_bus_freq_mode = 1;
			med_bus_freq_mode = 0;
		} else {
			clk_enable(pll2_400);
			update_ddr_freq(ddr_med_rate);
			/* Make sure periph clk's parent also got updated */
			clk_set_parent(periph_clk, pll2_400);
			high_bus_freq_mode = 0;
			med_bus_freq_mode = 1;
		}
		if (audio_bus_freq_mode)
			clk_disable(pll2_400);

		/* AXI_CLK is sourced from PLL3_PFD_540 on MX6DL */
		if (cpu_is_mx6dl() &&
			clk_get_parent(axi_clk) != pll3_540)
			clk_set_parent(axi_clk, pll3_540);

		low_bus_freq_mode = 0;
		audio_bus_freq_mode = 0;

		clk_disable(pll3);
	}
	return 0;
}
void reduce_bus_freq(void)
{
	if (!cpu_is_mx6sl()) {
		if (cpu_is_mx6dl() &&
			(clk_get_parent(axi_clk) != periph_clk))
			/* Set the axi_clk to be sourced from the periph_clk.
			  * So that its frequency can be lowered down to 50MHz
			  * or 24MHz as the case may be.
			  */
			clk_set_parent(axi_clk, periph_clk);

		clk_enable(pll3);
		if (lp_audio_freq) {
			/* Need to ensure that PLL2_PFD_400M is kept ON. */
			clk_enable(pll2_400);
			update_ddr_freq(DDR_AUDIO_CLK);
			/* Make sure periph clk's parent also got updated */
			clk_set_parent(periph_clk, pll2_200);
			audio_bus_freq_mode = 1;
			low_bus_freq_mode = 0;
		} else {
			update_ddr_freq(LPAPM_CLK);
			/* Make sure periph clk's parent also got updated */
			clk_set_parent(periph_clk, osc_clk);
			if (audio_bus_freq_mode)
				clk_disable(pll2_400);
			low_bus_freq_mode = 1;
			audio_bus_freq_mode = 0;
		}

		if (med_bus_freq_mode)
			clk_disable(pll2_400);

		clk_disable(pll3);
		med_bus_freq_mode = 0;
	} else {
		u32 reg;
		u32  div;
		unsigned long flags;

		if (high_bus_freq_mode) {
			/* Set periph_clk to be sourced from OSC_CLK */
			/* Set AXI to 24MHz. */
			clk_set_parent(periph_clk, osc_clk);
			clk_set_rate(axi_clk,
				clk_round_rate(axi_clk, LPAPM_CLK));
			/* Set AHB to 24MHz. */
			clk_set_rate(ahb_clk,
				clk_round_rate(ahb_clk, LPAPM_CLK));
		}
		if (lp_audio_freq) {
			u32 ttbr1;
			/* PLL2 is on in this mode, as DDR is at 100MHz. */
			/* Now change DDR freq while running from IRAM. */

			/* Set AHB to 24MHz. */
			clk_set_rate(ahb_clk,
				clk_round_rate(ahb_clk, LPAPM_CLK / 3));

			spin_lock_irqsave(&freq_lock, flags);
			/* sync the outer cache. */
			outer_sync();

			/* Save TTBR1 */
			ttbr1 = save_ttbr1();

			mx6sl_ddr_freq_change_iram(DDR_AUDIO_CLK,
							low_bus_freq_mode);
			restore_ttbr1(ttbr1);

			spin_unlock_irqrestore(&freq_lock, flags);

			if (low_bus_freq_mode) {
				/* Swtich ARM to run off PLL2_PFD2_400MHz
				 * since DDR is anyway at 100MHz.
				 */
				clk_set_parent(pll1_sw_clk, pll2_400);

				/* Ensure that the clock will be
				  * at original speed.
				  */
				reg = __raw_writel(org_arm_podf, MXC_CCM_CACRR);
				while (__raw_readl(MXC_CCM_CDHIPR))
					;
				/* We have enabled PLL1 in the code below when
				  * ARM is from PLL1, so disable it here.
				  */
				clk_disable(pll1);
			}
			low_bus_freq_mode = 0;
			audio_bus_freq_mode = 1;
		} else {
			u32 ttbr1;
			/* Set MMDC clk to 24MHz. */
			/* Since we are going to set PLL2 in bypass mode,
			  * move the CPU clock off PLL2.
			  */
			/* Ensure that the clock will be at
			  * lowest possible freq.
			  */
			org_arm_podf = __raw_readl(MXC_CCM_CACRR);
			/* Need to enable PLL1 before setting its rate. */
			clk_enable(pll1);
			clk_set_rate(pll1,
				cpu_op_tbl[cpu_op_nr - 1].pll_lpm_rate);
			div = clk_get_rate(pll1) /
					cpu_op_tbl[cpu_op_nr - 1].cpu_rate;

			reg = __raw_writel(div - 1, MXC_CCM_CACRR);
			while (__raw_readl(MXC_CCM_CDHIPR))
				;
			clk_set_parent(pll1_sw_clk, pll1);

			spin_lock_irqsave(&freq_lock, flags);
			/* sync the outer cache. */
			outer_sync();

			ttbr1 = save_ttbr1();
			/* Now change DDR freq while running from IRAM. */
			mx6sl_ddr_freq_change_iram(LPAPM_CLK,
					low_bus_freq_mode);
			restore_ttbr1(ttbr1);

			spin_unlock_irqrestore(&freq_lock, flags);

			low_bus_freq_mode = 1;
			audio_bus_freq_mode = 0;
		}
	}
	high_bus_freq_mode = 0;

}
Exemple #4
0
static ssize_t outer_sync_store(struct device_driver *driver, const char *buf, size_t count)
{
    outer_sync();

    return count;
}
static uint32_t
kgsl_ringbuffer_addcmds(struct kgsl_ringbuffer *rb,
				unsigned int flags, unsigned int *cmds,
				int sizedwords)
{
	unsigned int *ringcmds;
	unsigned int timestamp;
	unsigned int total_sizedwords = sizedwords + 6;
	unsigned int i;
	unsigned int rcmd_gpu;

	/* reserve space to temporarily turn off protected mode
	*  error checking if needed
	*/
	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
	total_sizedwords += (flags & KGSL_CMD_FLAGS_CONTEXT_CHANGE) ? 2 : 0;

	ringcmds = kgsl_ringbuffer_allocspace(rb, total_sizedwords);
	rcmd_gpu = rb->buffer_desc.gpuaddr
		+ sizeof(uint)*(rb->wptr-total_sizedwords);

	if (flags & KGSL_CMD_FLAGS_CONTEXT_CHANGE) {
		GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_nop_packet(1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_CTXT_IDENTIFIER);
	}

	if (flags & KGSL_CMD_FLAGS_PMODE) {
		/* disable protected mode error checking */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
	}

	for (i = 0; i < sizedwords; i++) {
		GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
		cmds++;
	}

	if (flags & KGSL_CMD_FLAGS_PMODE) {
		/* re-enable protected mode error checking */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
	}

	rb->timestamp++;
	timestamp = rb->timestamp;

	/* start-of-pipeline and end-of-pipeline timestamps */
	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type0_packet(REG_CP_TIMESTAMP, 1));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type3_packet(PM4_EVENT_WRITE, 3));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
	GSL_RB_WRITE(ringcmds, rcmd_gpu,
		     (rb->device->memstore.gpuaddr +
		      KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);

	/*memory barriers added for the timestamp update*/
	mb();
	dsb();
	outer_sync();

	if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
		/* Conditional execution based on memory values */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_COND_EXEC, 4));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
		/* # of conditional command DWORDs */
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_INTERRUPT, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
	}