示例#1
0
void cpu_start_restore(void)
{
    struct cpu_cluster *cc = (struct cpu_cluster *)__pa(&cpu_cluster);
    struct cpu *cp = (struct cpu *)__pa(&cpu);
    int *chip_ver = (int *)__pa(&e1_chip);

#if 0 //wschen 2011-08-12
    extern int armV7_perf_mon_is_overflow(unsigned int n);
    extern unsigned int armV7_perf_mon_get_cyc_cnt(void);
    extern void armV7_perf_mon_reset(void);
    extern void armV7_perf_mon_enable(unsigned int n);

    armV7_perf_mon_enable(0);
    armV7_perf_mon_reset();
    armV7_perf_mon_enable(1);
#endif

    if (*chip_ver) {
        volatile int i;

        for (i = 0; i < DELAY_COUNT; i++) {
            nop();
        }

        if (cc->power_state == STATUS_DORMANT) {
            reg_write(IO_VIRT_TO_PHYS(SC_PWR_CON0), 0xE125); /* SRAM wake up */
        } else {
            reg_write(IO_VIRT_TO_PHYS(SC_PWR_CON0), 0xC125); /* SRAM power up */
        }

        dsb();

        for (i = 0; i < DELAY_COUNT; i++) {
            nop();
        }

        if (cc->power_state == STATUS_DORMANT) {
            reg_write(IO_VIRT_TO_PHYS(SC_PWR_CON2), 0xE525); /* SRAM wake up */
        } else {
            reg_write(IO_VIRT_TO_PHYS(SC_PWR_CON2), 0xC525); /* SRAM power up */
        }

        dsb();

        for (i = 0; i < DELAY_COUNT; i++) {
            nop();
        }
    }

    platform_restore_context(cc, cp);
}
示例#2
0
    l2x0_init((void __iomem *)PL310_BASE, 0x70000000, 0x8FFFFFFF);
#endif  /* CONFIG_CACHE_L2X0 */

#if defined(CONFIG_HAVE_ARM_SCU)
    scu_enable((void *)SCU_BASE);

    /* set INFRA_ACP to 0x00003333 for receiving transactions to ACP */
    writel(0x00003333, INFRA_SYS_CFG_BASE + 0x0F04);
#endif  /* CONFIG_HAVE_ARM_SCU */
}

static struct map_desc mt6575_io_desc[] __initdata = 
{
    {
        .virtual = AP_RGU_BASE,
        .pfn = __phys_to_pfn(IO_VIRT_TO_PHYS(AP_RGU_BASE)),
        .length = SZ_16M,
        .type = MT_DEVICE
    },
    {
        .virtual = PERICFG_BASE,
        .pfn = __phys_to_pfn(IO_VIRT_TO_PHYS(PERICFG_BASE)),
        .length = SZ_16M,
        .type = MT_DEVICE
    },
    {
        .virtual = MMSYS1_CONFIG_BASE,
        .pfn = __phys_to_pfn(IO_VIRT_TO_PHYS(MMSYS1_CONFIG_BASE)),
        .length = SZ_16M,
        .type = MT_DEVICE
    },
示例#3
0
    int ret;

    ret = reserve_bootmem(__pa(BOOT_SHARE_BASE), 0x1000, BOOTMEM_EXCLUSIVE);
    if (ret < 0)
    {
        printk(KERN_WARNING "reserve_bootmem BOOT_SHARE_BASE failed %d\n", ret);
    }    
}
#endif

static struct map_desc mt_io_desc[] __initdata = 
{
#if !defined(CONFIG_MT6582_FPGA)
    {
        .virtual = INFRA_BASE,
        .pfn = __phys_to_pfn(IO_VIRT_TO_PHYS(INFRA_BASE)),
        .length = (SZ_1M - SZ_4K),
        .type = MT_DEVICE
    },
    /* Skip the mapping of 0xF0130000~0xF013FFFF to protect access from APMCU */
    {
        .virtual = (DEBUGTOP_BASE - SZ_4K),
        .pfn = __phys_to_pfn(IO_VIRT_TO_PHYS((DEBUGTOP_BASE - SZ_4K))),
        .length = (0x30000 + SZ_4K),
        .type = MT_DEVICE
    },        
    {
        .virtual = (DEBUGTOP_BASE + 0x40000),
        .pfn = __phys_to_pfn(IO_VIRT_TO_PHYS(DEBUGTOP_BASE + 0x40000)),
        .length = 0xC0000,
        .type = MT_DEVICE
static struct mali_gpu_device_data mali_gpu_data =
{
    // System memory
    .shared_mem_size = 1024 * 1024 * 1024, /* 1GB */
    // Framebuffer physical address, only for validation usage
    .fb_start = 0x80000000,
    .fb_size  = 0x80000000,
    // DVFS
    .utilization_interval = 200, /* ms */
    .utilization_callback = mali_pmm_utilization_handler /*<utilization function>,*/
};

static struct resource mali_gpu_resources[] =
{
    MALI_GPU_RESOURCES_MALI400_MP1(
                    IO_VIRT_TO_PHYS(MALI_BASE),
                    MT_MFG0_IRQ_ID,
                    MT_MFG1_IRQ_ID,
                    MT_MFG2_IRQ_ID,
                    MT_MFG3_IRQ_ID
                )
};

static struct dev_pm_ops mali_gpu_device_type_pm_ops =
{
    .suspend = mali_pm_suspend,
    .resume  = mali_pm_resume, 
    .freeze  = mali_pm_suspend, 
    .thaw    = mali_pm_resume,   
	.restore = mali_pm_resume,
	
extern void mt_power_off(void);
extern void mt_fixup(struct tag *tags, char **cmdline, struct meminfo *mi);
extern void mt_reserve(void);

static void __init mt_init(void)
{
    pm_power_off = mt_power_off;
    panic_on_oops = 1;
}

static struct map_desc mt_io_desc[] __initdata = 
{
    /* MM Subsys */
    {
        .virtual = MMSYS_CONFIG_BASE,
        .pfn = __phys_to_pfn(IO_VIRT_TO_PHYS(MMSYS_CONFIG_BASE)),
        .length = SZ_128K,
        .type = MT_DEVICE
    },

    /* G3D Sys */
    {
        .virtual = G3D_CONFIG_BASE,
        .pfn = __phys_to_pfn(IO_VIRT_TO_PHYS(G3D_CONFIG_BASE)),
        .length = SZ_128K,
        .type = MT_DEVICE
    },

    /* Perisys */
    {
        .virtual = AP_DMA_BASE,
示例#6
0
/**
 * This function restores all the context that was lost
 * when a CPU and cluster entered a low power state. It is called shortly after
 * reset, with the MMU and data cache off.
 *
 */
static void platform_restore_context(struct cpu_cluster *cluster, struct cpu *pcpu)
{
    struct cpu_context *context;
    struct cpu_cluster_context *cluster_context;
#if MAX_CPUS != 1
    int cluster_init;
#endif
    int *chip_ver = (int *)__pa(&e1_chip);

    /*
     * At this point we may not write to any data, and we may
     * only read the data that we explicitly cleaned from the L2 above.
     */
    cluster_context = &(cluster->context);
    context = &(pcpu->context);

    restore_cp15(context->cp15_data);

    /* Should we initialize the cluster: are we the first CPU back on, and has the cluster been off? */
#if MAX_CPUS != 1
    cluster_init = (cluster->active_cpus == 0 && cluster->power_state >= STATUS_DORMANT);
#endif


    /* First set up the SCU & L2, if necessary */
#if MAX_CPUS != 1
    if (cluster_init) {
#endif
        restore_a9_scu(cluster_context->scu_data, IO_VIRT_TO_PHYS(cluster->scu_address));

        restore_pl310(cluster_context->l2_data, IO_VIRT_TO_PHYS(cluster->l2_address), cluster->power_state == STATUS_DORMANT);
#if MAX_CPUS != 1
    }
#endif


    /* Next get the MMU back on */
    restore_mmu(context->mmu_data);


    if (cluster->power_state == STATUS_SHUTDOWN) {
        invalidate_icache_v7();
        invalidate_dcache_v7();
    } else {
        //E2 L1 cache still has HW issue
        if (*chip_ver != 1) {
            invalidate_icache_v7();
            invalidate_dcache_v7();
        }
    }

    restore_control_registers(context);

    //now MMU is restored, welcome to virtual world
    isb();
    dsb();


    enable_pl310(cpu_cluster.l2_address);
    enable_cache_v7_l1();

   /*
     * MMU and L1 and L2 caches are on, we may now read/write any data.
     * Now we need to restore the rest of this CPU's context
     */

#if 0 //wschen 2011-07-28
    /* Get the debug registers restored, so we can debug most of the code sensibly! */
    restore_a9_debug(cpu.context.debug_data);
#endif

    /* Restore shared items if necessary */
#if MAX_CPUS != 1
    if ((cpu_cluster.active_cpus == 0) && (cpu_cluster.power_state >= STATUS_DORMANT)) {
#endif
        restore_gic_distributor_shared((u32 *)cpu_cluster.context.gic_dist_shared_data, cpu_cluster.ic_address);
        restore_a9_global_timer((u32 *)cpu_cluster.context.global_timer_data, cpu_cluster.scu_address);
#if MAX_CPUS != 1
    }
#endif

    restore_gic_distributor_private((u32 *)cpu.context.gic_dist_private_data, cpu_cluster.ic_address);
    restore_gic_interface((u32 *)cpu.context.gic_interface_data, cpu.ic_address);

    restore_a9_other((u32 *)cpu.context.other_data);

    restore_vfp((u32 *)cpu.context.vfp_data);

    restore_a9_timers((u32 *)cpu.context.timer_data, cpu_cluster.scu_address);

    restore_performance_monitors((u32 *)cpu.context.pmu_data);

    dormant_ret_flag = 1;

    restore_banked_registers((u32 *)cpu.context.banked_registers);
}
	    model = of_get_flat_dt_prop(dt_root, "model", NULL);
	    if (!model)
		model = of_get_flat_dt_prop(dt_root, "compatible", NULL);
	    if (!model)
		model = "<unknown>";
	    strcpy(mdesc->name, model);
	    pr_info("mdesc->name=%s\n",mdesc->name);
	}
    }
}

static struct map_desc mt_io_desc[] __initdata =
{  
  {
		.virtual = CKSYS_BASE,
		.pfn = __phys_to_pfn(IO_VIRT_TO_PHYS(CKSYS_BASE)),
		.length = SZ_4K * 19,
		.type = MT_DEVICE,
	},
	{
		.virtual = MCUCFG_BASE,
		.pfn = __phys_to_pfn(IO_VIRT_TO_PHYS(MCUCFG_BASE)),
		.length = SZ_4K * 26,
		.type = MT_DEVICE
	},
	{
		.virtual = CA9_BASE,
		.pfn = __phys_to_pfn(IO_VIRT_TO_PHYS(CA9_BASE)),
		.length = SZ_32K,
		.type = MT_DEVICE
	},
{
    // System memory
    .shared_mem_size = 512 * 1024 * 1024, /* 1GB */
    // Framebuffer physical address, only for validation usage
    .fb_start = 0x80000000,
    .fb_size  = 0x80000000,
    // DVFS
    .control_interval   = 8, /* ms */
    .utilization_callback   = mali_pmm_utilization_handler, /*<utilization function>,*/
};


static struct resource mali_gpu_resources_mp1[] =
{
    MALI_GPU_RESOURCES_MALI400_MP1(
                    IO_VIRT_TO_PHYS(0xF3010000),
                    176, //MT_MFG_IRQ_GP_ID,
                    177, //MT_MFG_IRQ_GPMMU_ID ,
                    178, //MT_MFG_IRQ_PP0_ID,
                    179  //MT_MFG_IRQ_PPMMU0_ID
                )
};

static struct resource mali_gpu_resources_mp2[] =
{
    MALI_GPU_RESOURCES_MALI400_MP2(
                    IO_VIRT_TO_PHYS(0xF3010000),
                    176, //MT_MFG_IRQ_GP_ID,
                    177, //MT_MFG_IRQ_GPMMU_ID ,
                    178, //MT_MFG_IRQ_PP0_ID,
                    179, //MT_MFG_IRQ_PPMMU0_ID,