Пример #1
0
static int exynos5420_cpu_suspend(unsigned long arg)
{
	/* MCPM works with HW CPU identifiers */
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);

	__raw_writel(0x0, sysram_base_addr + EXYNOS5420_CPU_STATE);

	if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) {
		mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume);

		/*
		 * Residency value passed to mcpm_cpu_suspend back-end
		 * has to be given clear semantics. Set to 0 as a
		 * temporary value.
		 */
		mcpm_cpu_suspend(0);
	}

	pr_info("Failed to suspend the system\n");

	/* return value != 0 means failure */
	return 1;
}
Пример #2
0
static int sunxi_cpu_power_down_c2state(struct cpuidle_device *dev, \
                                               struct cpuidle_driver *drv, \
                                               int index)
{
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);

	cpu_pm_enter();
	//cpu_cluster_pm_enter();
	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
	smp_wmb();

	cpu_suspend(CPUIDLE_FLAG_C2_STATE, sunxi_powerdown_c2_finisher);

	/*
	 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
	 * variant exists, we need to disable IRQs manually here.
	 */
	local_irq_disable();

	arch_spin_lock(&sun8i_mcpm_lock);
	sun8i_cpu_use_count[cluster][cpu]++;
	sun8i_cluster_use_count[cluster]++;
	arch_spin_unlock(&sun8i_mcpm_lock);

	local_irq_enable();

	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
	//cpu_cluster_pm_exit();
	cpu_pm_exit();

	return index;
}
Пример #3
0
int mcpm_cpu_powered_up(void)
{
	unsigned int mpidr, cpu, cluster;
	bool cpu_was_down, first_man;
	unsigned long flags;

	if (!platform_ops)
		return -EUNATCH;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	local_irq_save(flags);
	arch_spin_lock(&mcpm_lock);

	cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
	first_man = mcpm_cluster_unused(cluster);

	if (first_man && platform_ops->cluster_is_up)
		platform_ops->cluster_is_up(cluster);
	if (cpu_was_down)
		mcpm_cpu_use_count[cluster][cpu] = 1;
	if (platform_ops->cpu_is_up)
		platform_ops->cpu_is_up(cpu, cluster);

	arch_spin_unlock(&mcpm_lock);
	local_irq_restore(flags);

	return 0;
}
Пример #4
0
int __init mcpm_sync_init(
	void (*power_up_setup)(unsigned int affinity_level))
{
	unsigned int i, j, mpidr, this_cluster;

	BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
	BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));

	/*
	 * Set initial CPU and cluster states.
	 * Only one cluster is assumed to be active at this point.
	 */
	for (i = 0; i < MAX_NR_CLUSTERS; i++) {
		mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
		mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
		for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
			mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
	}
	mpidr = read_cpuid_mpidr();
	this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	for_each_online_cpu(i)
		mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
	mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
	sync_cache_w(&mcpm_sync);

	if (power_up_setup) {
		mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
		sync_cache_w(&mcpm_power_up_setup_phys);
	}

	return 0;
}
void store_cpu_topology(unsigned int cpuid)
{
	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
	u64 mpidr;

	if (cpuid_topo->cluster_id != -1)
		goto topology_populated;

	mpidr = read_cpuid_mpidr();

	/* Uniprocessor systems can rely on default topology values */
	if (mpidr & MPIDR_UP_BITMASK)
		return;

	/* Create cpu topology mapping based on MPIDR. */
	if (mpidr & MPIDR_MT_BITMASK) {
		/* Multiprocessor system : Multi-threads per core */
		cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
	} else {
		/* Multiprocessor system : Single-thread per core */
		cpuid_topo->thread_id  = -1;
		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	}

	pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
		 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
		 cpuid_topo->thread_id, mpidr);

topology_populated:
	update_siblings_masks(cpuid);
}
Пример #6
0
/*
 * store_cpu_topology is called at boot when only one cpu is running
 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
 * which prevents simultaneous write access to cpu_topology array
 */
void store_cpu_topology(unsigned int cpuid)
{
	struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
	unsigned int mpidr;
	unsigned int cpu;

	/* If the cpu topology has been already set, just return */
	if (cpuid_topo->core_id != -1)
		return;

	mpidr = read_cpuid_mpidr();

	/* create cpu topology mapping */
	if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
		/*
		 * This is a multiprocessor system
		 * multiprocessor format & multiprocessor mode field are set
		 */

		if (mpidr & MPIDR_MT_BITMASK) {
			/* core performance interdependency */
			cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
				& MPIDR_LEVEL0_MASK;
			cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
				& MPIDR_LEVEL1_MASK;
			cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT)
				& MPIDR_LEVEL2_MASK;
		} else {
Пример #7
0
Файл: boot.c Проект: KGG814/AOS
/*
 * Entry point.
 *
 * Unpack images, setup the MMU, jump to the kernel.
 */
void main(void)
{
    int num_apps;
    int cpu_mode;

#ifdef CONFIG_SMP_ARM_MPCORE
    /* If not the boot strap processor then go to non boot main */
    if ( (read_cpuid_mpidr() & 0xf) != booting_cpu_id) {
        non_boot_main();
    }
#endif

    /* Print welcome message. */
    printf("\nELF-loader started on ");
    print_cpuid();
    platform_init();

    printf("  paddr=[%p..%p]\n", _start, _end - 1);

    /* Unpack ELF images into memory. */
    load_images(&kernel_info, &user_info, 1, &num_apps);
    if (num_apps != 1) {
        printf("No user images loaded!\n");
        abort();
    }

    /* Setup MMU. */
    cpu_mode = read_cpsr() & CPSR_MODE_MASK;
    if(cpu_mode == CPSR_MODE_HYPERVISOR){
        printf("Enabling hypervisor MMU and paging\n");
        init_lpae_boot_pd(&kernel_info);
        arm_enable_hyp_mmu();
    }
    /* If we are in HYP mode, we enable the SV MMU and paging
     * just in case the kernel does not support hyp mode. */
    printf("Enabling MMU and paging\n");
    init_boot_pd(&kernel_info);
    arm_enable_mmu();

#ifdef CONFIG_SMP_ARM_MPCORE
    /* Bring up any other CPUs */
    init_cpus();
    non_boot_lock = 1;
#endif

    /* Enter kernel. */
    if (UART_PPTR < kernel_info.virt_region_start) {
        printf("Jumping to kernel-image entry point...\n\n");
    } else {
        /* Our serial port is no longer accessible */
    }
    ((init_kernel_t)kernel_info.virt_entry)(user_info.phys_region_start,
                                            user_info.phys_region_end, user_info.phys_virt_offset,
                                            user_info.virt_entry);

    /* We should never get here. */
    printf("Kernel returned back to the elf-loader.\n");
    abort();
}
/*
 * exynos_set_core_flag - set the cluster id to IROM register
 *			  to ensure that we wake up with the
 *			  current cluster.
 */
static void exynos_set_core_flag(void)
{
	int cluster_id = (read_cpuid_mpidr() >> 8) & 0xf;

	if (cluster_id)
		__raw_writel(1, EXYNOS_IROM_DATA2);
	else
		__raw_writel(0, EXYNOS_IROM_DATA2);
}
Пример #9
0
static void mcpm_cpu_die(unsigned int cpu)
{
	unsigned int mpidr, pcpu, pcluster;
	mpidr = read_cpuid_mpidr();
	pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	mcpm_set_entry_vector(pcpu, pcluster, NULL);
	mcpm_cpu_power_down();
}
static int notrace mcpm_powerdown_finisher(unsigned long arg)
{
	u32 mpidr = read_cpuid_mpidr();
	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	u32 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	mcpm_set_entry_vector(cpu, this_cluster, cpu_resume);
	mcpm_cpu_suspend(arg);
	return 1;
}
Пример #11
0
static int notrace bl_powerdown_finisher(unsigned long arg)
{
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = (mpidr >> 8) & 0xf;
	unsigned int cpu = mpidr & 0xf;

	mcpm_set_entry_vector(cpu, cluster, cpu_resume);
	mcpm_cpu_suspend(0);  /* 0 should be replaced with better value here */
	return 1;
}
static void store_boot_cpu_info(void)
{
	unsigned int mpidr = read_cpuid_mpidr();

	boot_core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	boot_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	pr_info("A booting CPU: core %d cluster %d\n", boot_core_id,
						       boot_cluster_id);
}
Пример #13
0
/*
 * notrace prevents trace shims from getting inserted where they
 * should not. Global jumps and ldrex/strex must not be inserted
 * in power down sequences where caches and MMU may be turned off.
 */
static int notrace sunxi_powerdown_c2_finisher(unsigned long flg)
{
	/* MCPM works with HW CPU identifiers */
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	bool last_man = false;
	struct sunxi_enter_idle_para sunxi_idle_para;

	mcpm_set_entry_vector(cpu, cluster, cpu_resume);

	arch_spin_lock(&sun8i_mcpm_lock);
	sun8i_cpu_use_count[cluster][cpu]--;
	/* check is the last-man, and set flg */
	sun8i_cluster_use_count[cluster]--;
	if (sun8i_cluster_use_count[cluster] == 0) {
		writel(1, CLUSTER_CPUX_FLG(cluster, cpu));
		last_man = true;
	}
	arch_spin_unlock(&sun8i_mcpm_lock);

	/* call cpus to power off */
	sunxi_idle_para.flags = (unsigned long)mpidr | flg;
	sunxi_idle_para.resume_addr = (void *)(virt_to_phys(mcpm_entry_point));
	arisc_enter_cpuidle(NULL, NULL, &sunxi_idle_para);

	if (last_man) {
		int t = 0;

		/* wait for cpus received this message and respond,
		 * for reconfirm is this cpu the man really, then clear flg
		 */
		while (1) {
			udelay(2);
			if (readl(CLUSTER_CPUS_FLG(cluster, cpu)) == 2) {
				writel(0, CLUSTER_CPUX_FLG(cluster, cpu));
				break; /* last_man is true */
			} else if (readl(CLUSTER_CPUS_FLG(cluster, cpu)) == 3) {
				writel(0, CLUSTER_CPUX_FLG(cluster, cpu));
				goto out; /* last_man is false */
			}
			if(++t > 5000) {
				printk(KERN_WARNING "cpu%didle time out!\n",  \
				                     cluster * 4 + cpu);
				t = 0;
			}
		}
		sunxi_idle_cluster_die(cluster);
	}
out:
	sunxi_idle_cpu_die();

	/* return value != 0 means failure */
	return 1;
}
Пример #14
0
static void exynos5420_powerdown_conf(enum sys_powerdown mode)
{
	u32 this_cluster;

	this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);

	/*
	 * set the cluster id to IROM register to ensure that we wake
	 * up with the current cluster.
	 */
	pmu_raw_writel(this_cluster, EXYNOS_IROM_DATA2);
}
Пример #15
0
static void __init mmp_pm_usage_count_init(void)
{
    unsigned int mpidr, cpu, cluster;

    mpidr = read_cpuid_mpidr();
    cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
    cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

    BUG_ON(cpu >= MAX_CPUS_PER_CLUSTER || cluster >= MAX_NR_CLUSTERS);
    memset(mmp_pm_use_count, 0, sizeof(mmp_pm_use_count));
    mmp_pm_use_count[cluster][cpu] = 1;
}
Пример #16
0
static int rk3288_cpuidle_enter(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	void *sel = RK_CRU_VIRT + RK3288_CRU_CLKSELS_CON(36);
	u32 con = readl_relaxed(sel);
	u32 cpu = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 0);
	writel_relaxed(0x70007 << (cpu << 2), sel);
	cpu_do_idle();
	writel_relaxed((0x70000 << (cpu << 2)) | con, sel);
	dsb();
	return index;
}
Пример #17
0
void __init smp_setup_processor_id(void)
{
	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
	cpu_logical_map(0) = mpidr;

	/*
	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
	 * using percpu variable early, for example, lockdep will
	 * access percpu variable inside lock_release
	 */
	set_my_cpu_offset(0);
	pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
}
Пример #18
0
static void __init tc2_pm_usage_count_init(void)
{
	unsigned int mpidr, cpu, cluster;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
	BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
	       cpu >= vexpress_spc_get_nb_cpus(cluster));

	atomic_set(&tc2_pm_use_count[cpu][cluster], 1);
}
Пример #19
0
bool IsThereMailFromCore(uint32_t fromID)
{
    uint32_t                coreID  = read_cpuid_mpidr() & 0x3;
    uint32_t                mailboxSource       = readl( __io_address(ARM_LOCAL_MAILBOX0_CLR0) + (coreID*0x10));

    if( (mailboxSource&(1<<fromID)) != 0)
    {
        return true;
    }
    else
    {
        return false;
    }

}
Пример #20
0
void mcpm_cpu_suspend(void)
{
	if (WARN_ON_ONCE(!platform_ops))
		return;

	/* Some platforms might have to enable special resume modes, etc. */
	if (platform_ops->cpu_suspend_prepare) {
		unsigned int mpidr = read_cpuid_mpidr();
		unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
		unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 
		arch_spin_lock(&mcpm_lock);
		platform_ops->cpu_suspend_prepare(cpu, cluster);
		arch_spin_unlock(&mcpm_lock);
	}
	mcpm_cpu_power_down();
}
Пример #21
0
void SendDoorBellToCore(uint32_t coreNumber)
{
    uint32_t coreID;

    //
    // Ensure that stores to Normal memory are visible to the
    // other CPUs before issuing the IPI.
    //
    dsb();

    //
    // Cause the doorbell interrupt on the remote core.
    //
    coreID  = read_cpuid_mpidr();
    writel(1<<coreID, __io_address(ARM_LOCAL_MAILBOX0_SET0 + (0x10*coreNumber) ) );
}
Пример #22
0
static int exynos_power_up_cpu(unsigned int cpu)
{
	unsigned int timeout;
	unsigned int val;
	void __iomem *power_base;
	unsigned int cluster = (read_cpuid_mpidr() >> 8) & 0xf;

	power_base = cpu_boot_info[cpu].power_base;
	if (power_base == 0)
		return -EPERM;

	val = __raw_readl(power_base + 0x4);
	if (!(val & EXYNOS_CORE_LOCAL_PWR_EN)) {
		__raw_writel(EXYNOS_CORE_LOCAL_PWR_EN, power_base);

		/* wait max 10 ms until cpu is on */
		timeout = 10;
		while (timeout) {
			val = __raw_readl(power_base + 0x4);

			if ((val & EXYNOS_CORE_LOCAL_PWR_EN) ==
			     EXYNOS_CORE_LOCAL_PWR_EN)
				break;

			mdelay(1);
			timeout--;
		}

		if (timeout == 0) {
			printk(KERN_ERR "cpu%d power up failed", cpu);
			return -ETIMEDOUT;
		}
	}

	if (cluster) {
		while(!__raw_readl(EXYNOS_PMU_SPARE2))
			udelay(10);

		udelay(10);

		printk(KERN_DEBUG "cpu%d: SWRESET\n", cpu);
		val = ((1 << 20) | (1 << 8)) << cpu;
		__raw_writel(val, EXYNOS_SWRESET);
	}

	return 0;
}
Пример #23
0
static int exynos5420_pm_suspend(void)
{
	u32 this_cluster;

	exynos_pm_central_suspend();

	/* Setting SEQ_OPTION register */

	this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
	if (!this_cluster)
		pmu_raw_writel(EXYNOS5420_ARM_USE_STANDBY_WFI0,
				S5P_CENTRAL_SEQ_OPTION);
	else
		pmu_raw_writel(EXYNOS5420_KFC_USE_STANDBY_WFI0,
				S5P_CENTRAL_SEQ_OPTION);
	return 0;
}
Пример #24
0
static void mmp_pm_powered_up(void)
{
    int mpidr, cpu, cluster;
    unsigned long flags;

    mpidr = read_cpuid_mpidr();
    cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
    cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
    pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
    BUG_ON(cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER);

    cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_EXIT, MAX_LPM_INDEX);
#ifdef CONFIG_VOLDC_STAT
    vol_dcstat_event(VLSTAT_LPM_EXIT, 0, 0);
    vol_ledstatus_event(MAX_LPM_INDEX);
#endif
    trace_pxa_cpu_idle(LPM_EXIT(0), cpu, cluster);

    local_irq_save(flags);
    arch_spin_lock(&mmp_lpm_lock);

    if (cluster_is_idle(cluster)) {
        if (mmp_wake_saved && mmp_idle->ops->restore_wakeup) {
            mmp_wake_saved = 0;
            mmp_idle->ops->restore_wakeup();
        }
        /* If hardware really shutdown MP subsystem */
        if (!(readl_relaxed(regs_addr_get_va(REGS_ADDR_GIC_DIST) +
                            GIC_DIST_CTRL) & 0x1)) {
            pr_debug("%s: cpu%u: cluster%u is up!\n", __func__, cpu, cluster);
            cpu_cluster_pm_exit();
        }
    }

    if (!mmp_pm_use_count[cluster][cpu])
        mmp_pm_use_count[cluster][cpu] = 1;

    mmp_enter_lpm[cluster][cpu] = 0;

    if (mmp_idle->ops->clr_pmu)
        mmp_idle->ops->clr_pmu(cpu);

    arch_spin_unlock(&mmp_lpm_lock);
    local_irq_restore(flags);
}
Пример #25
0
void __init smp_init_cpus(void)
{
	void __iomem *scu_base = scu_base_addr();
	unsigned int i, ncores;

	if (soc_is_exynos4210() || soc_is_exynos4212() ||
			soc_is_exynos5250() || soc_is_exynos3250())
		ncores = 2;
	else if (soc_is_exynos4412() || soc_is_exynos5410()
		|| soc_is_exynos4415() || soc_is_exynos3470())
		ncores = 4;
	else if (soc_is_exynos5260())
#ifdef CONFIG_EXYNOS5_MP
		ncores = NR_CPUS;
#else
		ncores = read_cpuid_mpidr() & 0x100 ? 4 : 2;
#endif
	else if (soc_is_exynos5420())
Пример #26
0
static int exynos5420_cpu_suspend(unsigned long arg)
{
	/* MCPM works with HW CPU identifiers */
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);

	writel_relaxed(0x0, sysram_base_addr + EXYNOS5420_CPU_STATE);

	if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) {
		mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume);
		mcpm_cpu_suspend();
	}

	pr_info("Failed to suspend the system\n");

	/* return value != 0 means failure */
	return 1;
}
Пример #27
0
/*
 * notrace prevents trace shims from getting inserted where they
 * should not. Global jumps and ldrex/strex must not be inserted
 * in power down sequences where caches and MMU may be turned off.
 */
static int notrace bl_powerdown_finisher(unsigned long arg)
{
	/* MCPM works with HW CPU identifiers */
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);

	mcpm_set_entry_vector(cpu, cluster, cpu_resume);

	/*
	 * Residency value passed to mcpm_cpu_suspend back-end
	 * has to be given clear semantics. Set to 0 as a
	 * temporary value.
	 */
	mcpm_cpu_suspend(0);

	/* return value != 0 means failure */
	return 1;
}
Пример #28
0
static int __init nocache_trampoline(unsigned long _arg)
{
	void (*cache_disable)(void) = (void *)_arg;
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	phys_reset_t phys_reset;

	mcpm_set_entry_vector(cpu, cluster, cpu_resume);
	setup_mm_for_reboot();

	__mcpm_cpu_going_down(cpu, cluster);
	BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
	cache_disable();
	__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
	__mcpm_cpu_down(cpu, cluster);

	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
	phys_reset(virt_to_phys(mcpm_entry_point));
	BUG();
}
Пример #29
0
/*
 * Propagate the topology information of the processor_topology_node tree to the
 * cpu_topology array.
 */
static int __init parse_acpi_topology(void)
{
	bool is_threaded;
	int cpu, topology_id;

	is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;

	for_each_possible_cpu(cpu) {
		int i, cache_id;

		topology_id = find_acpi_cpu_topology(cpu, 0);
		if (topology_id < 0)
			return topology_id;

		if (is_threaded) {
			cpu_topology[cpu].thread_id = topology_id;
			topology_id = find_acpi_cpu_topology(cpu, 1);
			cpu_topology[cpu].core_id   = topology_id;
		} else {
			cpu_topology[cpu].thread_id  = -1;
			cpu_topology[cpu].core_id    = topology_id;
		}
		topology_id = find_acpi_cpu_topology_package(cpu);
		cpu_topology[cpu].package_id = topology_id;

		i = acpi_find_last_cache_level(cpu);

		if (i > 0) {
			/*
			 * this is the only part of cpu_topology that has
			 * a direct relationship with the cache topology
			 */
			cache_id = find_acpi_cpu_cache_topology(cpu, i);
			if (cache_id > 0)
				cpu_topology[cpu].llc_id = cache_id;
		}
	}

	return 0;
}
Пример #30
0
static void tc2_pm_psci_power_down(void)
{
	struct psci_power_state power_state;
	unsigned int mpidr, cpu, cluster;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	BUG_ON(!psci_ops.cpu_off);

	switch (atomic_dec_return(&tc2_pm_use_count[cpu][cluster])) {
	case 1:
		/*
		 * Overtaken by a power up. Flush caches, exit coherency,
		 * return & fake a reset
		 */
		set_cr(get_cr() & ~CR_C);

		flush_cache_louis();

		asm volatile ("clrex");
		set_auxcr(get_auxcr() & ~(1 << 6));

		return;
	case 0:
		/* A normal request to possibly power down the cluster */
		power_state.id = PSCI_POWER_STATE_ID;
		power_state.type = PSCI_POWER_STATE_TYPE_POWER_DOWN;
		power_state.affinity_level = PSCI_POWER_STATE_AFFINITY_LEVEL1;

		psci_ops.cpu_off(power_state);

		/* On success this function never returns */
	default:
		/* Any other value is a bug */
		BUG();
	}
}