Пример #1
0
void __init tegra_cpu_reset_handler_init(void)
{
#ifdef CONFIG_SMP
    __tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] =
        *((u32 *)cpu_present_mask);
    __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] =
        virt_to_phys((void *)tegra_secondary_startup);
#endif

#ifdef CONFIG_PM_SLEEP
    __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP1] =
        TEGRA_IRAM_CODE_AREA;
    __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP2] =
        virt_to_phys((void *)tegra_resume);
#endif

    /* Push all of reset handler data out to the L3 memory system. */
    __cpuc_coherent_kern_range(
        (unsigned long)&__tegra_cpu_reset_handler_data[0],
        (unsigned long)&__tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE]);

    outer_clean_range(__pa(&__tegra_cpu_reset_handler_data[0]),
                      __pa(&__tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE]));

    tegra_cpu_reset_handler_enable();
}
Пример #2
0
/*
 * Create the page directory entries for 0x0000,0000 <-> 0x0000,0000
 */
void create_mapping(void)
{
	*((volatile __u32 *)(PAGE_TBL_ADDR)) = 0xc4a;
	
	/** 
	 * clean dcache,, invalidat icache&invalidate tlb,
	 *
	 * function: 
	 *	to make sure the correct PA will be access.
	 * 
	 * cache:
	 * 	clean cache unit: is cache line size;
	 * 	whether the end addr will be flush? exclusive,not including.
	 *
	 * tlb:
	 * 	invalidate tlb unit: PAGE_SIZE;
	 * 	Not including end addr.
	 *
	 * Note: 
	 * 	actually, because the PA will be used at resume period time, 
	 * 	mean, not use immediately, 
	 * 	and, the cache will be clean at the end.
	 * 	so, the clean&invalidate is not necessary.
	 * 	do this here, just in case testing. like: jump to resume code for testing.
	**/

	//Note: 0xc000,0000,is device area; not need to flush cache.
	//ref: ./arch/arm/kernel/head.S
	__cpuc_coherent_kern_range((unsigned long)(PAGE_TBL_ADDR), (unsigned long)(PAGE_TBL_ADDR + (sizeof(u32))));
	local_flush_tlb_kernel_range((unsigned long)(PAGE_TBL_ADDR), (unsigned long)(PAGE_TBL_ADDR + (sizeof(u32))));
	return;
}
Пример #3
0
static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
			 unsigned long uaddr, void *kaddr, unsigned long len)
{
	/* VIPT non-aliasing D-cache */
	if (vma->vm_flags & VM_EXEC) {
		unsigned long addr = (unsigned long)kaddr;

		__cpuc_coherent_kern_range(addr, addr + len);
	}
}
Пример #4
0
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
			 unsigned long uaddr, void *kaddr,
			 unsigned long len, int write)
{
	/* VIPT non-aliasing cache */
	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
	    vma->vm_flags & VM_EXEC) {
		unsigned long addr = (unsigned long)kaddr;
		/* only flushing the kernel mapping on non-aliasing VIPT */
		__cpuc_coherent_kern_range(addr, addr + len);
	}
}
Пример #5
0
/**restore the va: 0x0000,0000 mapping. 
*@vaddr: the va of mmu mapping to restore.
*
*/
void restore_mapping(unsigned long vaddr)
{
	unsigned long addr;
	
	addr = vaddr & PAGE_MASK;
	
	if(addr != backup_tbl[0].vaddr){
		while(1);
		return;
	}

	*((volatile __u32 *)(PAGE_TBL_ADDR)) = backup_tbl[0].entry_val;
	//clean dcache, invalidat icache
	__cpuc_coherent_kern_range((unsigned long)(PAGE_TBL_ADDR), (unsigned long)(PAGE_TBL_ADDR + (sizeof(u32))));
	//	flust tlb after change mmu mapping.
	local_flush_tlb_kernel_range((unsigned long)(PAGE_TBL_ADDR), (unsigned long)(PAGE_TBL_ADDR + (sizeof(u32))));

	return;
}
Пример #6
0
/*
*********************************************************************************************************
*                           aw_early_suspend
*
*Description: prepare necessary info for suspend&resume;
*
*Return     : return 0 is process successed;
*
*Notes      : -1: data is ok;
*           -2: data has been destory.
*********************************************************************************************************
*/
static int aw_early_suspend(void)
{
#define MAX_RETRY_TIMES (5)

    __s32 retry = MAX_RETRY_TIMES;
    
    //backup device state
    mem_ccu_save((__ccmu_reg_list_t *)(SW_VA_CCM_IO_BASE));
    mem_gpio_save(&(saved_gpio_state));
    mem_tmr_save(&(saved_tmr_state));
    mem_twi_save(&(saved_twi_state));
    mem_sram_save(&(saved_sram_state));    

    if (likely(mem_para_info.axp_enable))
    {
        //backup volt and freq state, after backup device state
        mem_twi_init(AXP_IICBUS);
        /* backup voltages */
        while(-1 == (mem_para_info.suspend_dcdc2 = mem_get_voltage(POWER_VOL_DCDC2)) && --retry){
            ;
        }
        if(0 == retry){
            print_call_info();
            return -1;
        }else{
            retry = MAX_RETRY_TIMES;
        }   
        
        while(-1 == (mem_para_info.suspend_dcdc3 = mem_get_voltage(POWER_VOL_DCDC3)) && --retry){
            ;
        }
        if(0 == retry){
            print_call_info();
            return -1;
        }else{
            retry = MAX_RETRY_TIMES;
        }   
    }
    else
    {
        mem_para_info.suspend_dcdc2 = -1;
        mem_para_info.suspend_dcdc3 = -1;
    }
    printk("dcdc2:%d, dcdc3:%d\n", mem_para_info.suspend_dcdc2, mem_para_info.suspend_dcdc3);

    /*backup bus ratio*/
    mem_clk_getdiv(&mem_para_info.clk_div);
    /*backup pll ratio*/
    mem_clk_get_pll_factor(&mem_para_info.pll_factor);
    
    //backup mmu
    save_mmu_state(&(mem_para_info.saved_mmu_state));
    //backup cpu state
//    __save_processor_state(&(mem_para_info.saved_cpu_context));
    //backup 0x0000,0000 page entry, size?
//    save_mapping(MEM_SW_VA_SRAM_BASE);
//    mem_para_info.saved_cpu_context.ttb_1r = DRAM_STANDBY_PGD_PA;
    memcpy((void*)(DRAM_STANDBY_PGD_ADDR + 0x3000), (void*)0xc0007000, 0x1000);
	__cpuc_coherent_kern_range(DRAM_STANDBY_PGD_ADDR + 0x3000, DRAM_STANDBY_PGD_ADDR + 0x4000 - 1);
    mem_para_info.saved_mmu_state.ttb_1r = DRAM_STANDBY_PGD_PA;

    //prepare resume0 code for resume

    if((DRAM_BACKUP_SIZE1) < sizeof(mem_para_info)){
        //judge the reserved space for mem para is enough or not.
        print_call_info();
        return -1;
    }

    //clean all the data into dram
    memcpy((void *)DRAM_BACKUP_BASE_ADDR1, (void *)&mem_para_info, sizeof(mem_para_info));
    dmac_flush_range((void *)DRAM_BACKUP_BASE_ADDR1, (void *)(DRAM_BACKUP_BASE_ADDR1 + DRAM_BACKUP_SIZE1 - 1));

    //prepare dram training area data
    memcpy((void *)DRAM_BACKUP_BASE_ADDR2, (void *)DRAM_BASE_ADDR, DRAM_TRANING_SIZE);
    dmac_flush_range((void *)DRAM_BACKUP_BASE_ADDR2, (void *)(DRAM_BACKUP_BASE_ADDR2 + DRAM_BACKUP_SIZE2 - 1));
    
    mem_arch_suspend();
    save_processor_state(); 
    
    //before creating mapping, build the coherent between cache and memory
    __cpuc_flush_kern_all();
    __cpuc_coherent_kern_range(0xc0000000, 0xffffffff-1);
    //create 0x0000,0000 mapping table: 0x0000,0000 -> 0x0000,0000 
    //create_mapping();

    //clean and flush
    mem_flush_tlb();
    
#ifdef PRE_DISABLE_MMU
    //jump to sram: dram enter selfresh, and power off.
    mem = (super_standby_func)SRAM_FUNC_START_PA;
#else
    //jump to sram: dram enter selfresh, and power off.
    mem = (super_standby_func)SRAM_FUNC_START;
#endif
    //move standby code to sram
    memcpy((void *)SRAM_FUNC_START, (void *)&suspend_bin_start, (int)&suspend_bin_end - (int)&suspend_bin_start);

#ifdef CONFIG_AW_FPGA_PLATFORM
*(unsigned int *)(0xf0007000 - 0x4) = 0x12345678;
printk("%s,%d:%d\n",__FILE__,__LINE__, *(unsigned int *)(0xf0007000 - 0x4));
#endif 
#ifdef PRE_DISABLE_MMU
    //enable the mapping and jump
    //invalidate tlb? maybe, but now, at this situation,  0x0000 <--> 0x0000 mapping never stay in tlb before this.
    //busy_waiting();
    jump_to_suspend(mem_para_info.saved_mmu_state.ttb_1r, mem);
#else
    mem();
#endif

    return -2;

}