Exemplo n.º 1
0
static int __init msm_cpu_prepare(unsigned int cpu)
{
	u64 mpidr_el1 = cpu_logical_map(cpu);

	if (scm_is_mc_boot_available()) {

		if (mpidr_el1 & ~MPIDR_HWID_BITMASK) {
			pr_err("CPU%d:Failed to set boot address\n", cpu);
			return -ENOSYS;
		}

		if (scm_set_boot_addr_mc(virt_to_phys(secondary_holding_pen),
				BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 0)),
				BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 1)),
				BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 2)),
				SCM_FLAG_COLDBOOT_MC)) {
			pr_warn("CPU%d:Failed to set boot address\n", cpu);
			return -ENOSYS;
		}

	} else {
		if (scm_set_boot_addr(virt_to_phys(secondary_holding_pen),
			cold_boot_flags[cpu])) {
			pr_warn("Failed to set CPU %u boot address\n", cpu);
			return -ENOSYS;
		}
	}

	/* Mark CPU0 cold boot flag as done */
	if (per_cpu(cold_boot_done, 0) == false)
		per_cpu(cold_boot_done, 0) = true;

	return 0;
}
Exemplo n.º 2
0
static void exynos_cpu_up(unsigned int cpu_id)
{
	unsigned int phys_cpu = cpu_logical_map(cpu_id);
	unsigned int core_config, core, cluster;
	void __iomem *addr;

	core = MPIDR_AFFINITY_LEVEL(phys_cpu, 0);
	cluster = MPIDR_AFFINITY_LEVEL(phys_cpu, 1);

	addr = EXYNOS_PMU_CPU_CONFIGURATION(core + (4 * cluster));
	core_config = __raw_readl(addr);

	if ((core_config & LOCAL_PWR_CFG) != LOCAL_PWR_CFG) {
#ifdef CONFIG_SOC_EXYNOS7420
		if ((core_config & LOCAL_PWR_CFG) == CPU_RESET_UP_CONFIG) {
			unsigned int tmp = __raw_readl(EXYNOS_PMU_CPU_STATUS(core + (4 * cluster)));
			if ((tmp & LOCAL_PWR_CFG) != LOCAL_PWR_CFG)
				panic("%s: Abnormal core status\n", __func__);
		}
#endif

		core_config |= LOCAL_PWR_CFG;
		__raw_writel(core_config, addr);
	}
}
Exemplo n.º 3
0
static int exynos5420_cpu_suspend(unsigned long arg)
{
	/* MCPM works with HW CPU identifiers */
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);

	__raw_writel(0x0, sysram_base_addr + EXYNOS5420_CPU_STATE);

	if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) {
		mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume);

		/*
		 * Residency value passed to mcpm_cpu_suspend back-end
		 * has to be given clear semantics. Set to 0 as a
		 * temporary value.
		 */
		mcpm_cpu_suspend(0);
	}

	pr_info("Failed to suspend the system\n");

	/* return value != 0 means failure */
	return 1;
}
Exemplo n.º 4
0
static int sunxi_cpu_power_down_c2state(struct cpuidle_device *dev, \
                                               struct cpuidle_driver *drv, \
                                               int index)
{
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);

	cpu_pm_enter();
	//cpu_cluster_pm_enter();
	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
	smp_wmb();

	cpu_suspend(CPUIDLE_FLAG_C2_STATE, sunxi_powerdown_c2_finisher);

	/*
	 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
	 * variant exists, we need to disable IRQs manually here.
	 */
	local_irq_disable();

	arch_spin_lock(&sun8i_mcpm_lock);
	sun8i_cpu_use_count[cluster][cpu]++;
	sun8i_cluster_use_count[cluster]++;
	arch_spin_unlock(&sun8i_mcpm_lock);

	local_irq_enable();

	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
	//cpu_cluster_pm_exit();
	cpu_pm_exit();

	return index;
}
Exemplo n.º 5
0
static void cpu_to_pcpu(unsigned int cpu,
			unsigned int *pcpu, unsigned int *pcluster)
{
	unsigned int mpidr;

	mpidr = cpu_logical_map(cpu);
	*pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	*pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
}
Exemplo n.º 6
0
static void mcpm_cpu_die(unsigned int cpu)
{
	unsigned int mpidr, pcpu, pcluster;
	mpidr = read_cpuid_mpidr();
	pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	mcpm_set_entry_vector(pcpu, pcluster, NULL);
	mcpm_cpu_power_down();
}
static int notrace mcpm_powerdown_finisher(unsigned long arg)
{
	u32 mpidr = read_cpuid_mpidr();
	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	u32 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	mcpm_set_entry_vector(cpu, this_cluster, cpu_resume);
	mcpm_cpu_suspend(arg);
	return 1;
}
static void store_boot_cpu_info(void)
{
	unsigned int mpidr = read_cpuid_mpidr();

	boot_core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	boot_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	pr_info("A booting CPU: core %d cluster %d\n", boot_core_id,
						       boot_cluster_id);
}
Exemplo n.º 9
0
/*
 * notrace prevents trace shims from getting inserted where they
 * should not. Global jumps and ldrex/strex must not be inserted
 * in power down sequences where caches and MMU may be turned off.
 */
static int notrace sunxi_powerdown_c2_finisher(unsigned long flg)
{
	/* MCPM works with HW CPU identifiers */
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	bool last_man = false;
	struct sunxi_enter_idle_para sunxi_idle_para;

	mcpm_set_entry_vector(cpu, cluster, cpu_resume);

	arch_spin_lock(&sun8i_mcpm_lock);
	sun8i_cpu_use_count[cluster][cpu]--;
	/* check is the last-man, and set flg */
	sun8i_cluster_use_count[cluster]--;
	if (sun8i_cluster_use_count[cluster] == 0) {
		writel(1, CLUSTER_CPUX_FLG(cluster, cpu));
		last_man = true;
	}
	arch_spin_unlock(&sun8i_mcpm_lock);

	/* call cpus to power off */
	sunxi_idle_para.flags = (unsigned long)mpidr | flg;
	sunxi_idle_para.resume_addr = (void *)(virt_to_phys(mcpm_entry_point));
	arisc_enter_cpuidle(NULL, NULL, &sunxi_idle_para);

	if (last_man) {
		int t = 0;

		/* wait for cpus received this message and respond,
		 * for reconfirm is this cpu the man really, then clear flg
		 */
		while (1) {
			udelay(2);
			if (readl(CLUSTER_CPUS_FLG(cluster, cpu)) == 2) {
				writel(0, CLUSTER_CPUX_FLG(cluster, cpu));
				break; /* last_man is true */
			} else if (readl(CLUSTER_CPUS_FLG(cluster, cpu)) == 3) {
				writel(0, CLUSTER_CPUX_FLG(cluster, cpu));
				goto out; /* last_man is false */
			}
			if(++t > 5000) {
				printk(KERN_WARNING "cpu%didle time out!\n",  \
				                     cluster * 4 + cpu);
				t = 0;
			}
		}
		sunxi_idle_cluster_die(cluster);
	}
out:
	sunxi_idle_cpu_die();

	/* return value != 0 means failure */
	return 1;
}
Exemplo n.º 10
0
static inline int cpu_sw_idx_to_hw_idx(int cpu)
{
	uint32_t cluster_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
	uint32_t cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);

	if (cluster_id >= MAX_NUM_CLUSTER || cpu_id >= MAX_CPUS_PER_CLUSTER) {
		WARN(1, "cluster_id = %d, cpu_id = %d are not valid.\n", cluster_id, cpu_id);
		return 0;
	}

	return (cluster_id * MAX_CPUS_PER_CLUSTER + cpu_id);
}
Exemplo n.º 11
0
static void __init mmp_pm_usage_count_init(void)
{
    unsigned int mpidr, cpu, cluster;

    mpidr = read_cpuid_mpidr();
    cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
    cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

    BUG_ON(cpu >= MAX_CPUS_PER_CLUSTER || cluster >= MAX_NR_CLUSTERS);
    memset(mmp_pm_use_count, 0, sizeof(mmp_pm_use_count));
    mmp_pm_use_count[cluster][cpu] = 1;
}
Exemplo n.º 12
0
unsigned int exynos7420_cpu_state(unsigned int cpu_id)
{
	unsigned int phys_cpu = cpu_logical_map(cpu_id);
	unsigned int core, cluster, val;

	core = MPIDR_AFFINITY_LEVEL(phys_cpu, 0);
	cluster = MPIDR_AFFINITY_LEVEL(phys_cpu, 1);

	val = __raw_readl(EXYNOS_ARM_CORE_STATUS(core + (4 * cluster)))
						& EXYNOS_CORE_PWR_EN;

	return val == 0xf;
}
Exemplo n.º 13
0
static int exynos_cpu_state(unsigned int cpu_id)
{
	unsigned int phys_cpu = cpu_logical_map(cpu_id);
	unsigned int core, cluster, val;

	core = MPIDR_AFFINITY_LEVEL(phys_cpu, 0);
	cluster = MPIDR_AFFINITY_LEVEL(phys_cpu, 1);

	val = __raw_readl(EXYNOS_PMU_CPU_STATUS(core + (4 * cluster)))
						& LOCAL_PWR_CFG;

	return val == LOCAL_PWR_CFG;
}
Exemplo n.º 14
0
static void __init tc2_pm_usage_count_init(void)
{
	unsigned int mpidr, cpu, cluster;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
	BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
	       cpu >= vexpress_spc_get_nb_cpus(cluster));

	atomic_set(&tc2_pm_use_count[cpu][cluster], 1);
}
Exemplo n.º 15
0
void exynos7420_cpu_down(unsigned int cpu_id)
{
	unsigned int phys_cpu = cpu_logical_map(cpu_id);
	unsigned int tmp, core, cluster;
	void __iomem *addr;

	core = MPIDR_AFFINITY_LEVEL(phys_cpu, 0);
	cluster = MPIDR_AFFINITY_LEVEL(phys_cpu, 1);

	addr = EXYNOS_ARM_CORE_CONFIGURATION(core + (4 * cluster));

	tmp = __raw_readl(addr);
	tmp &= ~(EXYNOS_CORE_PWR_EN);
	__raw_writel(tmp, addr);
}
Exemplo n.º 16
0
static void msm_pm_write_boot_vector(unsigned int cpu, unsigned long address)
{
	uint32_t clust_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
	uint32_t cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
	unsigned long *start_address;
	unsigned long *end_address;

	if (clust_id >= MAX_NUM_CLUSTER || cpu_id >= MAX_CPUS_PER_CLUSTER)
		BUG();

	msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id)] = address;
	start_address = &msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id)];
	end_address = &msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id + 1)];
	dmac_clean_range((void *)start_address, (void *)end_address);
}
Exemplo n.º 17
0
int __init mcpm_sync_init(
	void (*power_up_setup)(unsigned int affinity_level))
{
	unsigned int i, j, mpidr, this_cluster;

	BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
	BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));

	/*
	 * Set initial CPU and cluster states.
	 * Only one cluster is assumed to be active at this point.
	 */
	for (i = 0; i < MAX_NR_CLUSTERS; i++) {
		mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
		mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
		for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
			mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
	}
	mpidr = read_cpuid_mpidr();
	this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	for_each_online_cpu(i)
		mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
	mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
	sync_cache_w(&mcpm_sync);

	if (power_up_setup) {
		mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
		sync_cache_w(&mcpm_power_up_setup_phys);
	}

	return 0;
}
Exemplo n.º 18
0
void exynos7420_secondary_up(unsigned int cpu_id)
{
	unsigned int phys_cpu = cpu_logical_map(cpu_id);
	unsigned int tmp, core, cluster;
	void __iomem *addr;

	core = MPIDR_AFFINITY_LEVEL(phys_cpu, 0);
	cluster = MPIDR_AFFINITY_LEVEL(phys_cpu, 1);

	addr = EXYNOS_ARM_CORE_CONFIGURATION(core + (4 * cluster));

	tmp = __raw_readl(addr);
	tmp |= EXYNOS_CORE_INIT_WAKEUP_FROM_LOWPWR | EXYNOS_CORE_PWR_EN;

	__raw_writel(tmp, addr);
}
Exemplo n.º 19
0
void mcpm_cpu_suspend(void)
{
	if (WARN_ON_ONCE(!platform_ops))
		return;

	/* Some platforms might have to enable special resume modes, etc. */
	if (platform_ops->cpu_suspend_prepare) {
		unsigned int mpidr = read_cpuid_mpidr();
		unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
		unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 
		arch_spin_lock(&mcpm_lock);
		platform_ops->cpu_suspend_prepare(cpu, cluster);
		arch_spin_unlock(&mcpm_lock);
	}
	mcpm_cpu_power_down();
}
Exemplo n.º 20
0
void clear_boot_flag(unsigned int cpu, unsigned int mode)
{
	unsigned int phys_cpu = cpu_logical_map(cpu);
	unsigned int tmp, core, cluster;
	void __iomem *addr;

	BUG_ON(mode == 0);

	core = MPIDR_AFFINITY_LEVEL(phys_cpu, 0);
	cluster = MPIDR_AFFINITY_LEVEL(phys_cpu, 1);
	addr = REG_CPU_STATE_ADDR + 4 * (core + cluster * 4);

	tmp = __raw_readl(addr);
	tmp &= ~mode;
	__raw_writel(tmp, addr);
}
Exemplo n.º 21
0
/**
 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
 *			  level in order to build a linear index from an
 *			  MPIDR value. Resulting algorithm is a collision
 *			  free hash carried out through shifting and ORing
 */
static void __init smp_build_mpidr_hash(void)
{
	u32 i, affinity, fs[4], bits[4], ls;
	u64 mask = 0;
	/*
	 * Pre-scan the list of MPIDRS and filter out bits that do
	 * not contribute to affinity levels, ie they never toggle.
	 */
	for_each_possible_cpu(i)
		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
	pr_debug("mask of set bits %#llx\n", mask);
	/*
	 * Find and stash the last and first bit set at all affinity levels to
	 * check how many bits are required to represent them.
	 */
	for (i = 0; i < 4; i++) {
		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
		/*
		 * Find the MSB bit and LSB bits position
		 * to determine how many bits are required
		 * to express the affinity level.
		 */
		ls = fls(affinity);
		fs[i] = affinity ? ffs(affinity) - 1 : 0;
		bits[i] = ls - fs[i];
	}
	/*
	 * An index can be created from the MPIDR_EL1 by isolating the
	 * significant bits at each affinity level and by shifting
	 * them in order to compress the 32 bits values space to a
	 * compressed set of values. This is equivalent to hashing
	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
	 * hash though not minimal since some levels might contain a number
	 * of CPUs that is not an exact power of 2 and their bit
	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
	 */
	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
						(bits[1] + bits[0]);
	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
				  fs[3] - (bits[2] + bits[1] + bits[0]);
	mpidr_hash.mask = mask;
	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
		mpidr_hash.shift_aff[0],
		mpidr_hash.shift_aff[1],
		mpidr_hash.shift_aff[2],
		mpidr_hash.shift_aff[3],
		mpidr_hash.mask,
		mpidr_hash.bits);
	/*
	 * 4x is an arbitrary value used to warn on a hash table much bigger
	 * than expected on most systems.
	 */
	if (mpidr_hash_size() > 4 * num_possible_cpus())
		pr_warn("Large number of MPIDR hash buckets detected\n");
	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
}
Exemplo n.º 22
0
static void mmp_pm_powered_up(void)
{
    int mpidr, cpu, cluster;
    unsigned long flags;

    mpidr = read_cpuid_mpidr();
    cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
    cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
    pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
    BUG_ON(cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER);

    cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_EXIT, MAX_LPM_INDEX);
#ifdef CONFIG_VOLDC_STAT
    vol_dcstat_event(VLSTAT_LPM_EXIT, 0, 0);
    vol_ledstatus_event(MAX_LPM_INDEX);
#endif
    trace_pxa_cpu_idle(LPM_EXIT(0), cpu, cluster);

    local_irq_save(flags);
    arch_spin_lock(&mmp_lpm_lock);

    if (cluster_is_idle(cluster)) {
        if (mmp_wake_saved && mmp_idle->ops->restore_wakeup) {
            mmp_wake_saved = 0;
            mmp_idle->ops->restore_wakeup();
        }
        /* If hardware really shutdown MP subsystem */
        if (!(readl_relaxed(regs_addr_get_va(REGS_ADDR_GIC_DIST) +
                            GIC_DIST_CTRL) & 0x1)) {
            pr_debug("%s: cpu%u: cluster%u is up!\n", __func__, cpu, cluster);
            cpu_cluster_pm_exit();
        }
    }

    if (!mmp_pm_use_count[cluster][cpu])
        mmp_pm_use_count[cluster][cpu] = 1;

    mmp_enter_lpm[cluster][cpu] = 0;

    if (mmp_idle->ops->clr_pmu)
        mmp_idle->ops->clr_pmu(cpu);

    arch_spin_unlock(&mmp_lpm_lock);
    local_irq_restore(flags);
}
Exemplo n.º 23
0
static void __init msm_platform_smp_prepare_cpus_mc(unsigned int max_cpus)
{
	int cpu, map;
	u32 aff0_mask = 0;
	u32 aff1_mask = 0;
	u32 aff2_mask = 0;

	for_each_present_cpu(cpu) {
		map = cpu_logical_map(cpu);
		aff0_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 0));
		aff1_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 1));
		aff2_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 2));
	}

	if (scm_set_boot_addr_mc(virt_to_phys(msm_secondary_startup),
		aff0_mask, aff1_mask, aff2_mask, SCM_FLAG_COLDBOOT_MC))
		pr_warn("Failed to set CPU boot address\n");
}
Exemplo n.º 24
0
/*
 * notrace prevents trace shims from getting inserted where they
 * should not. Global jumps and ldrex/strex must not be inserted
 * in power down sequences where caches and MMU may be turned off.
 */
static int notrace bl_powerdown_finisher(unsigned long arg)
{
	/* MCPM works with HW CPU identifiers */
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);

	mcpm_set_entry_vector(cpu, cluster, cpu_resume);

	/*
	 * Residency value passed to mcpm_cpu_suspend back-end
	 * has to be given clear semantics. Set to 0 as a
	 * temporary value.
	 */
	mcpm_cpu_suspend(0);

	/* return value != 0 means failure */
	return 1;
}
Exemplo n.º 25
0
static int exynos5420_cpu_suspend(unsigned long arg)
{
	/* MCPM works with HW CPU identifiers */
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);

	writel_relaxed(0x0, sysram_base_addr + EXYNOS5420_CPU_STATE);

	if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) {
		mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume);
		mcpm_cpu_suspend();
	}

	pr_info("Failed to suspend the system\n");

	/* return value != 0 means failure */
	return 1;
}
Exemplo n.º 26
0
static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned int mpidr, pcpu, pcluster, ret;
	extern void secondary_startup(void);

	mpidr = cpu_logical_map(cpu);
	pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
		 __func__, cpu, pcpu, pcluster);

	mcpm_set_entry_vector(pcpu, pcluster, NULL);
	ret = mcpm_cpu_power_up(pcpu, pcluster);
	if (ret)
		return ret;
	mcpm_set_entry_vector(pcpu, pcluster, secondary_startup);
	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
	dsb_sev();
	return 0;
}
Exemplo n.º 27
0
static int __cpuinit msm8936_boot_secondary(unsigned int cpu,
						struct task_struct *idle)
{
	pr_debug("Starting secondary CPU %d\n", cpu);

	if (per_cpu(cold_boot_done, cpu) == false) {
		u32 mpidr = cpu_logical_map(cpu);
		u32 apcs_base = MPIDR_AFFINITY_LEVEL(mpidr, 1) ?
				0xb088000 : 0xb188000;
		if (of_board_is_sim())
			release_secondary_sim(apcs_base,
				MPIDR_AFFINITY_LEVEL(mpidr, 0));
		else if (!of_board_is_rumi())
			arm_release_secondary(apcs_base,
				MPIDR_AFFINITY_LEVEL(mpidr, 0));

		per_cpu(cold_boot_done, cpu) = true;
	}
	return release_from_pen(cpu);
}
Exemplo n.º 28
0
static int rk3288_cpuidle_enter(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	void *sel = RK_CRU_VIRT + RK3288_CRU_CLKSELS_CON(36);
	u32 con = readl_relaxed(sel);
	u32 cpu = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 0);
	writel_relaxed(0x70007 << (cpu << 2), sel);
	cpu_do_idle();
	writel_relaxed((0x70000 << (cpu << 2)) | con, sel);
	dsb();
	return index;
}
Exemplo n.º 29
0
static int __init nocache_trampoline(unsigned long _arg)
{
	void (*cache_disable)(void) = (void *)_arg;
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	phys_reset_t phys_reset;

	mcpm_set_entry_vector(cpu, cluster, cpu_resume);
	setup_mm_for_reboot();

	__mcpm_cpu_going_down(cpu, cluster);
	BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
	cache_disable();
	__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
	__mcpm_cpu_down(cpu, cluster);

	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
	phys_reset(virt_to_phys(mcpm_entry_point));
	BUG();
}
Exemplo n.º 30
0
static void exynos5420_powerdown_conf(enum sys_powerdown mode)
{
	u32 this_cluster;

	this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);

	/*
	 * set the cluster id to IROM register to ensure that we wake
	 * up with the current cluster.
	 */
	pmu_raw_writel(this_cluster, EXYNOS_IROM_DATA2);
}