static inline void ipi_flush_cache_user_range(void *arg)
{
#if 0
	struct tlb_args *ta = (struct tlb_args *)arg;
	printk("function %s  line %d\n", __func__,__LINE__);
	local_flush_cache_user_range((struct vm_area_struct *)ta->ta_vma, ta->ta_start, ta->ta_end);
#else /* To verify that JAVA is working */
	__cpuc_flush_kern_all();
#endif
}
s32_t  pwrctrl_deep_sleep( void_t )
{
    u32_t key = 0;

    /*SOC下电区IP寄存器保存地址*/
    u32_t * pulSocRegBakAddr = (u32_t *)g_aAcpuStoreReg;

    if (RET_OK != pwrctrl_is_func_on(PWC_SWITCH_ASLEEP))
    {
        return RET_ERR;
    }

    BSP_IPC_SpinLock(IPC_SEM_SMP_CPU0);
    g_stAcpuPwcExcLog->core0.SlpCnt ++;
    g_stAcpuPwcExcLog->core0.SlpMgrSTm = pwrctrl_get_slice_time();

    local_irq_save(key);

    /*读中断pending寄存器,有中断就退出睡眠*/
    if (PWRCTRL_TRUE == pwrctrl_check_irq_pending_status())
    {

        pwrctrl_l2c_disable();
#ifdef CHIP_BB_HI6210

#else
        __cpuc_flush_kern_all();
#endif
        /*__cpuc_flush_user_all();*/
        /*__cpuc_flush_icache_all();*/

        pwrctrl_store_ip_reg(pulSocRegBakAddr);

        /*PD ENTRY*/
        pwrctrl_asm_deep_sleep_entry();

        pwrctrl_restore_ip_reg(pulSocRegBakAddr);
        pulSocRegBakAddr = NULL;

        pwrctrl_l2c_enable();

    }
    else
    {
        /*just for pclint*/
    }

    /* unlock  s32_terrupt   */
    local_irq_restore(key);

    g_stAcpuPwcExcLog->core0.WkCnt ++;
    g_stAcpuPwcExcLog->core0.WkMgrSTm = pwrctrl_get_slice_time();
    BSP_IPC_SpinUnLock(IPC_SEM_SMP_CPU0);
    return RET_OK;
}
Example #3
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * This is really belt and braces; we hold unintended secondary
	 * CPUs in the holding pen until we're ready for them.  However,
	 * since we haven't sent them a soft interrupt, they shouldn't
	 * be there.
	 */
	//writew((BSYM(virt_to_phys(vexpress_secondary_startup))>>16), SECOND_START_ADDR_HI);
	//writew(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR_LO);
	writel(0xbabe, SECOND_MAGIC_NUMBER_ADRESS);
	writel(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR);
	__cpuc_flush_kern_all();
	pen_release = cpu;

	#if 0  //debug
	unsigned int *ptr=&pen_release;
	printk("pen_release = 0x%08x, addr= 0x%08x, pen_release ptr = 0x%08x\n ",pen_release,&pen_release,*ptr);
	#endif
	
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	smp_cross_call(cpumask_of(cpu));

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);
	return pen_release != -1 ? -ENOSYS : 0;
}
Example #4
0
void __init smp_prepare_cpus(unsigned int max_cpus)
{
    unsigned int ncores = num_possible_cpus();
    unsigned int cpu = smp_processor_id();
    int i;

    smp_store_cpu_info(cpu);

    /*
     * are we trying to boot more cores than exist?
     */
    if (max_cpus > ncores)
        max_cpus = ncores;

    /*
     * Initialise the present map, which describes the set of CPUs
     * actually populated at the present time.
     */
    for (i = 0; i < max_cpus; i++)
        set_cpu_present(i, true);

    /*
     * Initialise the SCU if there are more than one CPU and let
     * them know where to start.
     */
    if (max_cpus > 1) {
        /*
         * Enable the local timer or broadcast device for the
         * boot CPU, but only if we have more than one CPU.
         */
        percpu_timer_setup();

        scu_enable((void __iomem *)(PERI_ADDRESS(0x16000000))); // SCU PA = 0x16000000

        /*
         * Write the address of secondary startup into the
         * system-wide flags register. The boot monitor waits
         * until it receives a soft interrupt, and then the
         * secondary CPU branches to this address.
         */

        //printk("_Secondary_startup physical address = 0x%08x\n",BSYM(virt_to_phys(vexpress_secondary_startup)));
        //writew((BSYM(virt_to_phys(vexpress_secondary_startup))>>16), SECOND_START_ADDR_HI);
        writel(0xbabe, SECOND_MAGIC_NUMBER_ADRESS);
        writel(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR);
        __cpuc_flush_kern_all();

    }
}
Example #5
0
/* Hook for arm_pm_restart to ensure we execute the reset code
 * with the caches enabled. It seems at least the S3C2440 has a problem
 * resetting if there is bus activity interrupted by the reset.
 */
static void s3c24xx_pm_restart(char mode, const char *cmd)
{
	if (mode != 's') {
		unsigned long flags;

		local_irq_save(flags);
		__cpuc_flush_kern_all();
		__cpuc_flush_user_all();

		arch_reset(mode, cmd);
		local_irq_restore(flags);
	}

	/* fallback, or unhandled */
	arm_machine_restart(mode, cmd);
}
Example #6
0
/* Hook for arm_pm_restart to ensure we execute the reset code
 * with the caches enabled. It seems at least the S3C2440 has a problem
 * resetting if there is bus activity interrupted by the reset.
 */
static void s5pc11x_pm_restart(char mode)
{
	printk("Restarting system %s\n","S5PC11X");
	if (mode != 's') {
		unsigned long flags;

		local_irq_save(flags);
		__cpuc_flush_kern_all();
		__cpuc_flush_user_all();

		arch_reset(mode);
		local_irq_restore(flags);
	}

	/* fallback, or unhandled */
	arm_machine_restart(mode);
}
Example #7
0
static int g2d_ioctl(struct inode *inode, struct file *file,
		     unsigned int cmd, unsigned long arg)
{
	struct g2d_dma_info dma_info;
	void *vaddr;

	if (cmd == G2D_WAIT_FOR_IRQ) {
		wait_event_timeout(g2d->wq,
				(atomic_read(&g2d->in_use) == 1), 10000);
		atomic_set(&g2d->in_use, 0);
		return 0;
	}

	if (copy_from_user(&dma_info, (struct g2d_dma_info *)arg,
				sizeof(dma_info)))
		return -EFAULT;

	vaddr = phys_to_virt(dma_info.addr);

	switch (cmd) {
	case G2D_DMA_CACHE_INVAL:
		dmac_inv_range(vaddr, vaddr + dma_info.size);
		break;

	case G2D_DMA_CACHE_CLEAN:
		dmac_clean_range(vaddr, vaddr + dma_info.size);
		break;

	case G2D_DMA_CACHE_FLUSH:
		dmac_flush_range(vaddr, vaddr + dma_info.size);
		break;

	case G2D_DMA_CACHE_FLUSH_ALL:
		__cpuc_flush_kern_all();
		break;

	default:
		break;
	}

	return 0;
}
Example #8
0
/*
*********************************************************************************************************
*                           aw_early_suspend
*
*Description: prepare necessary info for suspend&resume;
*
*Return     : return 0 is process successed;
*
*Notes      : -1: data is ok;
*           -2: data has been destory.
*********************************************************************************************************
*/
static int aw_early_suspend(void)
{
#define MAX_RETRY_TIMES (5)

    __s32 retry = MAX_RETRY_TIMES;
    
    //backup device state
    mem_ccu_save((__ccmu_reg_list_t *)(SW_VA_CCM_IO_BASE));
    mem_gpio_save(&(saved_gpio_state));
    mem_tmr_save(&(saved_tmr_state));
    mem_twi_save(&(saved_twi_state));
    mem_sram_save(&(saved_sram_state));    

    if (likely(mem_para_info.axp_enable))
    {
        //backup volt and freq state, after backup device state
        mem_twi_init(AXP_IICBUS);
        /* backup voltages */
        while(-1 == (mem_para_info.suspend_dcdc2 = mem_get_voltage(POWER_VOL_DCDC2)) && --retry){
            ;
        }
        if(0 == retry){
            print_call_info();
            return -1;
        }else{
            retry = MAX_RETRY_TIMES;
        }   
        
        while(-1 == (mem_para_info.suspend_dcdc3 = mem_get_voltage(POWER_VOL_DCDC3)) && --retry){
            ;
        }
        if(0 == retry){
            print_call_info();
            return -1;
        }else{
            retry = MAX_RETRY_TIMES;
        }   
    }
    else
    {
        mem_para_info.suspend_dcdc2 = -1;
        mem_para_info.suspend_dcdc3 = -1;
    }
    printk("dcdc2:%d, dcdc3:%d\n", mem_para_info.suspend_dcdc2, mem_para_info.suspend_dcdc3);

    /*backup bus ratio*/
    mem_clk_getdiv(&mem_para_info.clk_div);
    /*backup pll ratio*/
    mem_clk_get_pll_factor(&mem_para_info.pll_factor);
    
    //backup mmu
    save_mmu_state(&(mem_para_info.saved_mmu_state));
    //backup cpu state
//    __save_processor_state(&(mem_para_info.saved_cpu_context));
    //backup 0x0000,0000 page entry, size?
//    save_mapping(MEM_SW_VA_SRAM_BASE);
//    mem_para_info.saved_cpu_context.ttb_1r = DRAM_STANDBY_PGD_PA;
    memcpy((void*)(DRAM_STANDBY_PGD_ADDR + 0x3000), (void*)0xc0007000, 0x1000);
	__cpuc_coherent_kern_range(DRAM_STANDBY_PGD_ADDR + 0x3000, DRAM_STANDBY_PGD_ADDR + 0x4000 - 1);
    mem_para_info.saved_mmu_state.ttb_1r = DRAM_STANDBY_PGD_PA;

    //prepare resume0 code for resume

    if((DRAM_BACKUP_SIZE1) < sizeof(mem_para_info)){
        //judge the reserved space for mem para is enough or not.
        print_call_info();
        return -1;
    }

    //clean all the data into dram
    memcpy((void *)DRAM_BACKUP_BASE_ADDR1, (void *)&mem_para_info, sizeof(mem_para_info));
    dmac_flush_range((void *)DRAM_BACKUP_BASE_ADDR1, (void *)(DRAM_BACKUP_BASE_ADDR1 + DRAM_BACKUP_SIZE1 - 1));

    //prepare dram training area data
    memcpy((void *)DRAM_BACKUP_BASE_ADDR2, (void *)DRAM_BASE_ADDR, DRAM_TRANING_SIZE);
    dmac_flush_range((void *)DRAM_BACKUP_BASE_ADDR2, (void *)(DRAM_BACKUP_BASE_ADDR2 + DRAM_BACKUP_SIZE2 - 1));
    
    mem_arch_suspend();
    save_processor_state(); 
    
    //before creating mapping, build the coherent between cache and memory
    __cpuc_flush_kern_all();
    __cpuc_coherent_kern_range(0xc0000000, 0xffffffff-1);
    //create 0x0000,0000 mapping table: 0x0000,0000 -> 0x0000,0000 
    //create_mapping();

    //clean and flush
    mem_flush_tlb();
    
#ifdef PRE_DISABLE_MMU
    //jump to sram: dram enter selfresh, and power off.
    mem = (super_standby_func)SRAM_FUNC_START_PA;
#else
    //jump to sram: dram enter selfresh, and power off.
    mem = (super_standby_func)SRAM_FUNC_START;
#endif
    //move standby code to sram
    memcpy((void *)SRAM_FUNC_START, (void *)&suspend_bin_start, (int)&suspend_bin_end - (int)&suspend_bin_start);

#ifdef CONFIG_AW_FPGA_PLATFORM
*(unsigned int *)(0xf0007000 - 0x4) = 0x12345678;
printk("%s,%d:%d\n",__FILE__,__LINE__, *(unsigned int *)(0xf0007000 - 0x4));
#endif 
#ifdef PRE_DISABLE_MMU
    //enable the mapping and jump
    //invalidate tlb? maybe, but now, at this situation,  0x0000 <--> 0x0000 mapping never stay in tlb before this.
    //busy_waiting();
    jump_to_suspend(mem_para_info.saved_mmu_state.ttb_1r, mem);
#else
    mem();
#endif

    return -2;

}
static void level1_cache_flush_all(void)
{
	DBG_MSG(4, ("meson_drm_ump_osk_msync(): Flushing the whole L1 cache\n"));
	__cpuc_flush_kern_all();
}
int  ddr_reconfig(int mode)
{
    int baklcdctrl;
    int count =0;
    int i;
    unsigned int ret =0;
    unsigned int con3save, flags;
    unsigned int tmo =0;
    mode &=0xf;
    if((pDDR_Reg->MMGCR ==0) &&(mode <2))
    {
        pDDR_Reg->PQCR[0] =(mode ==0) ?0x0e000000 : 0x0e00f000;
        pDDR_Reg->PQCR[1] =(mode ==0) ?0x0e000000 : 0x0e03f000;
        pDDR_Reg->PQCR[2] =(mode ==0) ?0x0e000000 : 0x0e00f000;
        pGRF_Reg->GRF_MEM_CON = (pGRF_Reg->GRF_MEM_CON & ~0x3FF)
                    | ((mode ==0) ?((2<<0)|(1<<2)|(0<<4)|(1<<6)|(2<<8)):((0<<0)|(2<<2)|(1<<4)|(2<<6)|(2<<8)));
        return 1;
    }   
    local_irq_save(flags);
    sram_printch('1');
/*    if(mode ==2)
    {
        tmp =*(unsigned long volatile *)(0xf50080bc);
        pDDR_Reg->PQCR[0] =0x0e03f000;
        pDDR_Reg->PQCR[1] =0x0e01f000;
        pDDR_Reg->PQCR[2] =0x0e00f000;
        pDDR_Reg->MMGCR =(mode ==0) ?0 : 2;
    }
*/
//    asm volatile ("cpsid	if");
    {
        __cpuc_flush_kern_all();
        __cpuc_flush_user_all();
        dsb();
        //some risk: if a common to lcdc is going, then a read form 0xf410c0000 may retrun an old val
        con3save =pSCU_Reg->CRU_CLKGATE_CON[3];
        pSCU_Reg->CRU_CLKGATE_CON[3] =con3save |(1<<3);
        pGRF_Reg->GRF_SOC_CON[0] |=(1<<0);
    {
        gpu_suspended =0;
        gpu_power =0;
        gpu_clock =0;
        
        if((*(unsigned long volatile *)(RK29_PMU_BASE +0x10) &0x40) ==0)
        {
            gpu_power =1;
            if((0xf<<14) !=(pSCU_Reg->CRU_CLKGATE_CON[3] &(0xf<<14)))
            {
                gpu_clock =1;
                if(*(unsigned long volatile *)(RK29_GPU_BASE +0x4) !=0x7fffffff)
                {   //clock enable and not at idle
                    gpu_suspended =1;
#if 1
                    #if 1
                    int chktime =0;
                    for(chktime =0; chktime<32; chktime++ )
                    {
                        if(*(unsigned long volatile *)(RK29_GPU_BASE +0x4) !=0x7ffffffe)
                        {    chktime =0;
                              //
                        if((tmo =tmodelay1us(tmo)) >10)
                        #if 0 //RECONFIG_DEBUG
                            while(1);
                        #else
                            goto ddr_reconfig_cancel;
                        #endif
                        }
                    }
                    #if RECONFIG_DEBUG
                    if(tmo >maxtimeout)
                    {
                        maxtimeout =tmo;
                        printk("maxtimout %d\n", maxtimeout);
                    }
                    #endif
                    {
                        unsigned int i,tmp;
                        currcmdbufadr =*(unsigned long volatile *)(RK29_GPU_BASE +0x664);
                        if((currcmdbufadr&0xfff0) ==0)
                            for(i =0; i<6; i++)
                            {
                                tmp =*(unsigned long volatile *)(RK29_GPU_BASE +0x664);
                                if(((tmp >currcmdbufadr) &&((tmp -currcmdbufadr) >0x10))
                                    ||((tmp <currcmdbufadr) &&((currcmdbufadr -tmp) >0x10)))
                                {
                                    printk("gpu:cmdbuffer base reg read error 0x%x !=0x%x\n", tmp, currcmdbufadr);
                                    i =0;
                                }
                                else
                                    delayus(1);
                                currcmdbufadr =tmp;
                            }
                    }
                    #if 0
                    for(i =0; i<0x1000; i++)
                    {
                        unsigned int tmp;
                        if(currcmdbufadr >(tmp =*(unsigned long volatile *)(0xf4120664)))
                            currcmdbufadr =tmp;
                    }
                    #else
                    if(*(int *)(currcmdbufadr +0x60000000) !=0x380000c8) //0x60000000 assume VA =PA +0x60000000
                    {
                        currcmdbufadr -=8;
                        if(*(int *)(currcmdbufadr +0x60000000) !=0x380000c8)
                        {
                            currcmdbufadr -=8;
                            if(*(int *)(currcmdbufadr +0x60000000) !=0x380000c8)
                            #if RECONFIG_DEBUG
                                while(1);
                            #else
                                goto ddr_reconfig_cancel;
                            #endif
                        }
                    }
                    #endif
                    #if 0 //RECONFIG_DEBUG
                    if((currcmdbufadr &0xffffe000) !=0x736ce000)
                        while(1);
                    {
                        int i;
                        for(i =0; i<16; i++)
                            mem[i] =*(int *)(currcmdbufadr +0x60000000 +(i-4)*4);
                    }
            
                    #endif
                    #endif
                    
                    *(unsigned long volatile *)(RK29_GPU_BASE +0x658) =0x2;
                    dsb();
                    while(*(unsigned long volatile *)(RK29_GPU_BASE +0x4) !=0x7fffffff) delayus(1);      //
#else
                    gpuctl =*(unsigned long volatile *)(RK29_GPU_BASE +0x0);
                    *(unsigned long volatile *)(RK29_GPU_BASE +0x0) =gpususpendcmd;
                    delayus(100);
#endif
                }
            }
        }
    sram_printch('5');
        if(!(gpu_clock &gpu_power))
        {
            unsigned int tmoadd1ms =tmo +3000;
                sram_printch('c');
//            if(tmo==0)
                if(pGRF_Reg->GRF_OS_REG[3] ==0xff)
                    while(1);
            pSCU_Reg->CRU_CLKGATE_CON[3] =(con3save |(1<<3)) &0xfffc3fff;
            clksel17 =pSCU_Reg->CRU_CLKSEL_CON[17];
            pSCU_Reg->CRU_CLKSEL_CON[17]&=~(3<<14);
            dsb();
		    *(unsigned long volatile *)(RK29_PMU_BASE +0x10) &=~0x40;
            dsb();
            while((tmo =tmodelay1us(tmo)) <tmoadd1ms);
            pSCU_Reg->CRU_CLKGATE_CON[3] =(con3save |(1<<3)) &0xfffc3fff;
        }
    }        
    sram_printch('6');
    //status check
        //3 VIP clock con2[22,18](0x20000064) VIPCTL[0](0x10108010) 0==stop 
        while(((0)==(pSCU_Reg->CRU_CLKGATE_CON[2] &((0x1<<18)|(0x1<<22)))) 
            &&((0)!=(*(unsigned long volatile *)(RK29_VIP_BASE +0x10) &(1<<0))) &&((1)!=(*(unsigned long volatile *)(RK29_VIP_BASE +0x2c) &(1<<0))))
            if((tmo =tmodelay1us(tmo)) >20)
            #if RECONFIG_DEBUG
               // goto ddr_reconfig_cancel2;
                while(1);
            #else
                goto ddr_reconfig_cancel2;
            #endif

    sram_printch('7');
        //1 IPP clock_con3[5:4](0x20000068)  INT_ST[6](0x10110010) 1 ==working
        if(((0)==(pSCU_Reg->CRU_CLKGATE_CON[3] &(0x3<<4))) &&
            ((0)!=(*(unsigned long volatile *)(RK29_IPP_BASE +0x10) &(1<<6))))
            if((tmo =tmodelay1us(tmo)) >200000)
            #if RECONFIG_DEBUG
                while(1);
            #else
                goto ddr_reconfig_cancel2;
            #endif
    sram_printch('8');
        //2 SDMA0 clock con0[10](0x2000005c) DSR[3:0](0x20180000) 0 ==stop
        
//        i2sxfer =*(unsigned long volatile *)(RK29_I2S0_BASE +0x28);
//        *(unsigned long volatile *)(RK29_I2S0_BASE +0x28) =0;
        while(((0)==(pSCU_Reg->CRU_CLKGATE_CON[0] &(0x1<<10))) 
            &&(((0)!=(*(unsigned long volatile *)(RK29_SDMAC0_BASE +0x0) &(0xf<<0))) 
                ||(((0)!=(*(unsigned long volatile *)(RK29_SDMAC0_BASE +0x100) &(0xf<<0)))/*&& ((0x27)!=(*(unsigned long volatile *)(RK29_SDMAC0_BASE +0x100) &(0xff<<0)))*/)
                    ||((0)!=(*(unsigned long volatile *)(RK29_SDMAC0_BASE +0x108) &(0xf<<0))) 
                        ||((0)!=(*(unsigned long volatile *)(RK29_SDMAC0_BASE +0x110) &(0xf<<0))) 
                            ||((0)!=(*(unsigned long volatile *)(RK29_SDMAC0_BASE +0x118) &(0xf<<0)))))
            if((tmo =tmodelay1us(tmo)) >200000)
            #if RECONFIG_DEBUG
                while(1);
            #else
                goto ddr_reconfig_cancel2;
            #endif
    sram_printch('9');
        //2 DMA0 clock con0[9](0x2000005c) DSR[3:0](0x201C0000) 0 ==stop
        while(((0)==(pSCU_Reg->CRU_CLKGATE_CON[0] &(0x1<<9))) 
            &&(((0)!=(*(unsigned long volatile *)(RK29_DMAC0_BASE +0x0) &(0xf<<0))) 
                ||((0)!=(*(unsigned long volatile *)(RK29_DMAC0_BASE +0x100) &(0xf<<0))) 
                    ||((0)!=(*(unsigned long volatile *)(RK29_DMAC0_BASE +0x108) &(0xf<<0))) 
                        ||((0)!=(*(unsigned long volatile *)(RK29_DMAC0_BASE +0x110) &(0xf<<0))) 
                            ||((0)!=(*(unsigned long volatile *)(RK29_DMAC0_BASE +0x118) &(0xf<<0)))))
            if((tmo =tmodelay1us(tmo)) >200000)
            #if RECONFIG_DEBUG
                while(1);
            #else
                goto ddr_reconfig_cancel2;
            #endif
    sram_printch('a');
        //2 DMA1 clock con1[5](0x20000060) DSR[3:0](0x20078000) 0 ==stop
        while(((0)==(pSCU_Reg->CRU_CLKGATE_CON[1] &(0x1<<5))) 
            &&(((0)!=(*(unsigned long volatile *)(RK29_DMAC1_BASE +0x0) &(0xf<<0))) 
                ||((0)!=(*(unsigned long volatile *)(RK29_DMAC1_BASE +0x100) &(0xf<<0))) 
                    ||((0)!=(*(unsigned long volatile *)(RK29_DMAC1_BASE +0x108) &(0xf<<0))) 
                        ||((0)!=(*(unsigned long volatile *)(RK29_DMAC1_BASE +0x110) &(0xf<<0))) 
                            ||((0)!=(*(unsigned long volatile *)(RK29_DMAC1_BASE +0x118) &(0xf<<0)))))
            if((tmo =tmodelay1us(tmo)) >200000)
            #if RECONFIG_DEBUG
                while(1);
            #else
                goto ddr_reconfig_cancel2;
            #endif
    sram_printch('b');
/*
        //4 USB
        if(((0)==(*(unsigned long volatile *)(0xf5000068) &(0x3<<4))) &&
            ((0)==(*(unsigned long volatile *)(0xf4110010) &(1<<6))))
            while(1);
*/
        //5 VPU when select VDPU clk VDPU clock con2[19,13:12]else con2[18,11:10] (0x20000068) wreg[1](0x10104204)  0==stop
        //wreg24[0] 0==stop
        {
            int clkgatemask;
            clkgatemask =((0x1<<18)|(0x3<<10))<<((((pGRF_Reg->GRF_SOC_CON[0]))>>23) &1);
            if((0)==(pSCU_Reg->CRU_CLKGATE_CON[3] &clkgatemask))
            while((((0)!=(*(unsigned long volatile *)(RK29_VCODEC_BASE +0x204) &(1<<0)))
                &&((0)==(*(unsigned long volatile *)(RK29_VCODEC_BASE +0x204) &(1<<13)))) //until idle or buff_int
                    ||((0)!=(*(unsigned long volatile *)(RK29_VCODEC_BASE +0x38) &(1<<0))))
                if((tmo =tmodelay1us(tmo)) >200000)
                #if RECONFIG_DEBUG
                    while(1);
                #else
                    goto ddr_reconfig_cancel2;
                #endif
        }
//        while(((0xf<<14)!=(pSCU_Reg->CRU_CLKGATE_CON[3] &(0xf<<14))) &&
//            (*(unsigned long volatile *)(0xf4120004) !=0x7fffffff));
    sram_printch('2');
    
        {
	        static unsigned long save_sp;

	        DDR_SAVE_SP(save_sp);
            {
	            __ddr_reconfig(mode);

            }
	        DDR_RESTORE_SP(save_sp);
        } //    do_ddr_reconfig(mode);
///////////////////////////////////////////////////////////   
    sram_printch('3');
        ret =1;
//        *(unsigned long volatile *)(RK29_I2S0_BASE +0x28) =i2sxfer;
        if(gpu_suspended)
        {
#if 1
            *(unsigned long volatile *)(RK29_GPU_BASE +0x654) =currcmdbufadr;
            *(unsigned long volatile *)(RK29_GPU_BASE +0x658) =0x10002;
            dsb();
            while(*(unsigned long volatile *)(RK29_GPU_BASE +0x4) !=0x7ffffffe);
            #if RECONFIG_DEBUG
            mem[34] =*(unsigned long volatile *)(RK29_GPU_BASE +0x660);
            mem[35] =*(unsigned long volatile *)(RK29_GPU_BASE +0x664);
            mem[36] =*(unsigned long volatile *)(RK29_GPU_BASE +0x668);
            mem[37] =*(unsigned long volatile *)(RK29_GPU_BASE +0x66c);
            {
                int i;
                for(i =0; i<16; i++)
                    mem[i+16] =*(int *)(currcmdbufadr +0x60000000 +(i-4)*4);
            }
            mem[32] =currcmdbufadr;
            mem[33]++;
//            printk("reconfig 0x%x  ,0x%x  ,0x%x  ,0x%x  ,", *(unsigned int volatile *)(0xf4120660),
//                *(unsigned int volatile *)(0xf4120664),*(unsigned int volatile *)(0xf4120668),
//                *(unsigned int volatile *)(0xf412066c));
            #endif
#else
            *(unsigned long volatile *)(RK29_GPU_BASE +0x0) =gpuctl;
#endif
        }
    
        #if RECONFIG_DEBUG
        printk("clkgate =0x%x, 0x%x\n",pSCU_Reg->CRU_CLKGATE_CON[3],tmo);
        #endif
        count++;

ddr_reconfig_cancel2:
        if(!gpu_clock )
            pSCU_Reg->CRU_CLKSEL_CON[17] =clksel17;
        if(!gpu_power)
            *(unsigned long volatile *)(RK29_PMU_BASE +0x10) |=0x40;
        dsb();
        #if RECONFIG_DEBUG
        if((gpu_power ==0) &&( 1 ==gpu_clock))
            while(1);
        #endif
ddr_reconfig_cancel:
        pSCU_Reg->CRU_CLKGATE_CON[3] =con3save;
        pGRF_Reg->GRF_SOC_CON[0]&=~(1<<0);
    }
    local_irq_restore(flags);
    sram_printch('4');
    return ret;
}
static void flush_cpu_cache(void)
{
	__cpuc_flush_kern_all();
	//__cpuc_flush_l2cache_all();
}