Пример #1
0
/**
 * This function is called when the OS makes a firmware call with the 
 * function code APPF_INTIALIZE.
 *
 * It is called using the APPF translation tables that were set up in
 * appf_runtime_init, above.
 */
static int late_init(void)
{
    struct appf_cluster *cluster;
    int cluster_index;
    unsigned maintable_pa;
		dbg_prints("late_init ...\n");
    cluster_index = appf_platform_get_cluster_index();
    
    maintable_pa = reloc_addr((unsigned)&main_table);
    cluster = ((struct appf_main_table*)maintable_pa)->cluster_table + cluster_index;
     /*
     * Clean the translation tables out of the L1 dcache
     * (see comments in disable_clean_inv_dcache_v7_l1)
     */
    dsb();
    
//    clean_dcache_v7_l1();

    return appf_platform_late_init(cluster);
}
Пример #2
0
void secure_task(void)
{
	unsigned *pcommand =
	    (unsigned *)(&(secure_task_share_mem[TASK_COMMAND_OFFSET]));
	unsigned *response =
	    (unsigned *)(&(secure_task_share_mem[TASK_RESPONSE_OFFSET]));
	unsigned command;
	struct resume_param *presume;
	unsigned int state;

	/*init bss */
	bss_init();
	dbg_prints("secure task start!\n");

	/* suspend pwr ops init*/
	suspend_pwr_ops_init();
	*pcommand = 0;

	while (1) {
		/* do secure task process */
		command = *pcommand;
		if (command) {
			dbg_print("process command ", command);
			if (command == SEC_TASK_GET_WAKEUP_SRC) {
				state = *(pcommand+1);
				suspend_get_wakeup_source(
						(void *)response,  state);
			} else if (command == COMMAND_SUSPEND_ENTER) {
				state = *(pcommand+1);
				enter_suspend(state);
				*pcommand = 0;
				*response = RESPONSE_SUSPEND_LEAVE;
				presume = (struct resume_param *)(response+1);
				presume->method = resume_data.method;
			}
	}
		__switch_back_highmb();
	}
}
Пример #3
0
void high_task(void)
{
	unsigned *pcommand =
	    (unsigned *)(&(high_task_share_mem[TASK_COMMAND_OFFSET]));
	unsigned *response =
	    (unsigned *)(&(high_task_share_mem[TASK_RESPONSE_OFFSET]));
	unsigned command;

	dbg_prints("high task start!\n");
	*pcommand = 0;

	while (1) {
		/* do high task process */
		command = *pcommand;
		if (command) {
			/*dbg_print("process command ", command);*/
			process_high_task(command);
			*pcommand = 0;
			*response = 0;
		}
		__switch_back_highmb();
	}
}
Пример #4
0
void low_task(void)
{
	unsigned *pcommand =
	    (unsigned *)(&(low_task_share_mem[TASK_COMMAND_OFFSET]));
	unsigned *response =
	    (unsigned *)(&(low_task_share_mem[TASK_RESPONSE_OFFSET]));
	unsigned command;

	*pcommand = 0;
	dbg_prints("low task start!\n");

	while (1) {
		/* do low task process */
		command = *pcommand;
		if (command) {
			process_low_task(command);

			*pcommand = 0;
			*response = 0;
		}
		__switch_back_lowmb();
	}
}
Пример #5
0
/**
 * This function is called when the OS makes a firmware call with the 
 * function code APPF_POWER_DOWN_CPU
 */
static int power_down_cpu(unsigned cstate, unsigned rstate, unsigned flags)
{
    struct appf_cpu *cpu;
    struct appf_cluster *cluster;
    int cpu_index, cluster_index;
    int i, rc, cluster_can_enter_cstate1;
    struct appf_main_table* pmaintable = (struct appf_main_table*)reloc_addr((unsigned)&main_table);
#ifdef USE_REALVIEW_EB_RESETS
    int system_reset = FALSE, last_cpu = FALSE;
#endif
    cpu_index = appf_platform_get_cpu_index();
    cluster_index = appf_platform_get_cluster_index();
	 
    cluster = pmaintable->cluster_table;
    cluster += cluster_index;
	 	
    dbg_print("cluster:",cluster);
    
    cpu = cluster->cpu_table;
    cpu += cpu_index;   
   
    dbg_print("cpu:",cpu_index);
    dbg_print("cluster_index:",cluster_index);

    /* Validate arguments */
    if (cstate > 3)
    {
        return APPF_BAD_CSTATE;
    }
    if (rstate > 3)
    {
        return APPF_BAD_RSTATE;
    }
    /* If we're just entering standby mode, we don't mark the CPU as inactive */
    if (cstate == 1)
    {
        get_spinlock(cpu_index, cluster->context->lock);
        cpu->power_state = 1;
        
        /* See if we can make the cluster standby too */
        if (rstate == 1)
        {
            cluster_can_enter_cstate1 = TRUE;
            for(i=0; i<cluster->num_cpus; ++i)
            {
                if (cluster->cpu_table[i].power_state != 1)
                {
                    cluster_can_enter_cstate1 = FALSE;
                    break;
                }
            }
            if (cluster_can_enter_cstate1)
            {
                cluster->power_state = 1;
            }
        }
                
        rc = appf_platform_enter_cstate1(cpu_index, cpu, cluster);

        if (rc == 0)
        {
            release_spinlock(cpu_index, cluster->context->lock);
            dsb();
            wfi();
            get_spinlock(cpu_index, cluster->context->lock);
            rc = appf_platform_leave_cstate1(cpu_index, cpu, cluster);
        }
        
        cpu->power_state = 0;
        cluster->power_state = 0;
        release_spinlock(cpu_index, cluster->context->lock);
        return rc;
    }

    /* Ok, we're not just entering standby, so we are going to lose the context on this CPU */
		dbg_prints("step1\n");
	  get_spinlock(cpu_index, cluster->context->lock);
    --cluster->active_cpus;
		dbg_prints("step2\n");
		
    cpu->power_state = cstate;
    if (cluster->active_cpus == 0)
    {
        cluster->power_state = rstate;
#ifdef USE_REALVIEW_EB_RESETS
        /* last CPU down must not issue WFI, or we get stuck! */
        last_cpu = TRUE;
        if (rstate > 1)
        {
            system_reset = TRUE;
        }
#endif
    }
  
    /* add flags as required by hardware (e.g. APPF_SAVE_L2 if L2 is on) */
    flags |= cpu->context->flags;
    appf_platform_save_context(cluster, cpu, flags);
		
	dbg_prints("step3\n");
			

    /* Call the platform-specific shutdown code */
    rc = appf_platform_enter_cstate(cpu_index, cpu, cluster);
   
     /* Did the power down succeed? */
    if (rc == APPF_OK)
    {

        release_spinlock(cpu_index, cluster->context->lock);

        while (1) 
        {
#if 0
#if defined(NO_PCU) || defined(USE_REALVIEW_EB_RESETS)
            extern void platform_reset_handler(unsigned, unsigned, unsigned, unsigned);
            void (*reset)(unsigned, unsigned, unsigned, unsigned) = platform_reset_handler;

#ifdef USE_REALVIEW_EB_RESETS
            /* Unlock system registers */
            *(volatile unsigned *)0x10000020 = 0xa05f;
            if (system_reset)
            {
                /* Tell the Realview EB to do a system reset */
                *(volatile unsigned *)0x10000040 = 6;
                /* goto reset vector! */
            }
            else
            {
                if (!last_cpu)
                {
                    /* Tell the Realview EB to put this CPU into reset */
                    *(volatile unsigned *)0x10000074 &= ~(1 << (6 + cpu_index));
                    /* goto reset vector! (when another CPU takes us out of reset) */
                }
            }
#endif
            /*
             * If we get here, either we are the last CPU, or the EB resets 
             * aren't present (e.g. Emulator). So, fake a reset: Turn off MMU, 
             * corrupt registers, wait for a while, jump to warm reset entry point
             */
            write_sctlr(read_sctlr() & ~0x10001807); /* clear TRE, I Z C M */
            dsb();
            for (i=0; i<10000; ++i)
            {
                __nop();
            }
            reset(0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef);
#endif
#endif

            dsb();    
            wfi(); /* This signals the power controller to cut the power */
            /* Next stop, reset vector! */
        }
    }
    else
    {
        /* Power down failed for some reason, return to the OS */
        appf_platform_restore_context(cluster, cpu);
        cpu->power_state = 0;
        cluster->power_state = 0;
        ++cluster->active_cpus;
        release_spinlock(cpu_index, cluster->context->lock);
    }
    return rc;
}
Пример #6
0
/**
 * This function saves all the context that will be lost 
 * when a CPU and cluster enter a low power state.
 *
 * This function is called with cluster->context->lock held
 */
int appf_platform_save_context(struct appf_cluster *cluster, struct appf_cpu *cpu, unsigned flags)
{
    appf_u32 saved_items = 0;
    appf_u32 cluster_saved_items = 0;
    struct appf_cpu_context *context = cpu->context;
    struct appf_cluster_context *cluster_context = cluster->context;
    int cluster_down;
    
    dbg_prints("save step 1\n");

    /* Save perf. monitors first, so we don't interfere too much with counts */
    if (flags & APPF_SAVE_PMU)
    {
        save_performance_monitors(context->pmu_data);
        saved_items |= SAVED_PMU;
    }
    dbg_prints("save step 2\n");

    if (flags & APPF_SAVE_TIMERS)
    {
        save_a9_timers(context->timer_data, cluster->scu_address);
        saved_items |= SAVED_TIMERS;
    }
    dbg_prints("save step 3\n");

    if (flags & APPF_SAVE_VFP)
    {
        save_vfp(context->vfp_data);
        saved_items |= SAVED_VFP;
    }
    
    dbg_prints("save step 4\n");
		
		if(cluster->ic_address)
    	save_gic_interface(context->gic_interface_data, cluster->ic_address);
    	
	   if(cluster->ic_address)	
    	save_gic_distributor_private(context->gic_dist_private_data, cluster->ic_address);
    /* TODO: check return value and quit if nonzero! */
    dbg_prints("save step 5\n");

    save_banked_registers(context->banked_registers);
    
    save_cp15(context->cp15_data);
    save_a9_other(context->other_data);
    
    if (flags & APPF_SAVE_DEBUG)
    {
        save_a9_debug(context->debug_data);
        saved_items |= SAVED_DEBUG;
    }
    dbg_prints("save step 6\n");
    cluster_down = cluster->power_state >= 2;

  //  if (cluster_down)
    {
        if ((flags & APPF_SAVE_TIMERS) && cluster->cpu_version >= 0x0100)
        {	
            save_a9_global_timer(cluster_context->global_timer_data, cluster->scu_address);
            cluster_saved_items |= SAVED_GLOBAL_TIMER;
        }

        save_gic_distributor_shared(cluster_context->gic_dist_shared_data, cluster->ic_address);
        
    }
 
    save_control_registers(context);
    save_mmu(context->mmu_data);
    context->saved_items = saved_items;
    dbg_prints("save step 7\n");
  //  if (cluster_down)
    {
    		if(cluster->scu_address)
        	save_a9_scu(cluster_context->scu_data, cluster->scu_address);

        if (flags & APPF_SAVE_L2)
        {
            save_pl310(cluster_context->l2_data, cluster->l2_address);
            cluster_saved_items |= SAVED_L2;
        }
        cluster_context->saved_items = cluster_saved_items;
    }
    dbg_prints("save step 8\n");

    /* 
     * DISABLE DATA CACHES
     *
     * First, disable, then clean+invalidate the L1 cache.
     *
     * Note that if L1 was to be dormant and we were the last CPU, we would only need to clean some key data
     * out of L1 and clean+invalidate the stack.
     */
    //asm volatile("mov r0,#0");
    //asm volatile("mcr p15, 0, r0, c7, c5, 0");

    		
		//disable_clean_inv_dcache_v7_l1();
	//v7_flush_dcache_all();

    /* 
     * Next, disable cache coherency
     */
    if (cluster->scu_address)
    {
        write_actlr(read_actlr() & ~A9_SMP_BIT);
    }
    dbg_prints("save step 9\n");

    /*
     * If the L2 cache is in use, there is still more to do.
     *
     * Note that if the L2 cache is not in use, we don't disable the MMU, as clearing the C bit is good enough.
     */
    if (flags & APPF_SAVE_L2)
    {
        /*
         * Disable the MMU (and the L2 cache if necessary), then clean+invalidate the stack in the L2.
         * This all has to be done one assembler function as we can't use the C stack during these operations.
         */
         
		dbg_print("falg=",flags)
        disable_clean_inv_cache_pl310(cluster->l2_address, appf_platform_get_stack_pointer() - STACK_SIZE, 
                                      STACK_SIZE, cluster_down);
  
        /*
         * We need to partially or fully clean the L2, because we will enter reset with cacheing disabled
         */
//        if (cluster_down)
        {
            /* Clean the whole thing */
            //clean_pl310(cluster->l2_address);
//			l2x0_flush_all();
        }
//        else
        {
            /* 
	     * L2 staying on, so just clean everything this CPU will need before the MMU is reenabled
	     *
             * TODO: some of this data won't change after boottime init, could be cleaned once during late_init
	     */
//            clean_range_pl310(cluster,           sizeof(struct appf_cluster),         cluster->l2_address);
//            clean_range_pl310(cpu,               sizeof(struct appf_cpu),             cluster->l2_address);
//            clean_range_pl310(context,           sizeof(struct appf_cpu_context),     cluster->l2_address);
//            clean_range_pl310(context->mmu_data, MMU_DATA_SIZE,                       cluster->l2_address);
//            clean_range_pl310(cluster_context,   sizeof(struct appf_cluster_context), cluster->l2_address);
        }
    }
    dbg_prints("save step 10\n");

    return APPF_OK;
}