/*
 * Enable the SCU
 */
void __init scu_enable(void __iomem *scu_base)
{
	u32 scu_ctrl;

	/* added by hkou 02/22/2012 */
#ifdef CONFIG_ARM_ERRATA_764369
	/* Cortex-A9 only */
	if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
		scu_ctrl = __raw_readl(scu_base + 0x30);
		if (!(scu_ctrl & 1))
			__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
	}
#endif

	scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
	/* already enabled? */
	if (scu_ctrl & 1)
		return;

	/* updated by hkou & ted 02/22/2012 */
	scu_ctrl |= 0x29;
	__raw_writel(scu_ctrl, scu_base + SCU_CTRL);

	/*
	 * Ensure that the data accessed by CPU0 before the SCU was
	 * initialised is visible to the other CPUs.
	 */
	flush_cache_all();
}
示例#2
0
/*
 * Enable the SCU
 */
void __init scu_enable(void __iomem *scu_base)
{
	u32 scu_ctrl;

#ifdef CONFIG_ARM_ERRATA_764369
	/* Cortex-A9 only */
	if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
		scu_ctrl = __raw_readl(scu_base + 0x30);
		if (!(scu_ctrl & 1))
			__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
	}
#endif

	scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
	/* already enabled? */
	if (scu_ctrl & 1)
		return;

#ifdef CONFIG_MESON6_SMP_HOTPLUG
	scu_ctrl |= 1 |(3<<5);
#else
	scu_ctrl |= 1;
#endif
	__raw_writel(scu_ctrl, scu_base + SCU_CTRL);

	/*
	 * Ensure that the data accessed by CPU0 before the SCU was
	 * initialised is visible to the other CPUs.
	 */
	flush_cache_all();
}
示例#3
0
/*
 * Initialise the CPU possible map early - this describes the CPUs
 * which may be present or become present in the system.
 */
static void __init omap4_smp_init_cpus(void)
{
	unsigned int i = 0, ncores = 1, cpu_id;

	/* Use ARM cpuid check here, as SoC detection will not work so early */
	cpu_id = read_cpuid(CPUID_ID) & CPU_MASK;
	if (cpu_id == CPU_CORTEX_A9) {
		/*
		 * Currently we can't call ioremap here because
		 * SoC detection won't work until after init_early.
		 */
		scu_base =  OMAP2_L4_IO_ADDRESS(OMAP44XX_SCU_BASE);
		BUG_ON(!scu_base);
		ncores = scu_get_core_count(scu_base);
	} else if (cpu_id == CPU_CORTEX_A15) {
		ncores = OMAP5_CORE_COUNT;
	}

	/* sanity check */
	if (ncores > nr_cpu_ids) {
		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
			ncores, nr_cpu_ids);
		ncores = nr_cpu_ids;
	}

	for (i = 0; i < ncores; i++)
		set_cpu_possible(i, true);

	set_smp_cross_call(gic_raise_softirq);
}
示例#4
0
/*
 * Enable the SCU
 */
void scu_enable(void *scu_base)
{
	u32 scu_ctrl;

#ifdef CONFIG_ARM_ERRATA_764369
	/*
	 * This code is mostly for TEGRA 2 and 3 processors. 
	 * This in not enabled or tested on Xvisor for now.
	 * We keep it as we might have to enable it someday.
	 */
	/* Cortex-A9 only */
	if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {

		scu_ctrl = vmm_readl(scu_base + 0x30);
		if (!(scu_ctrl & 1)) {
			vmm_writel(scu_ctrl | 0x1, scu_base + 0x30);
		}
	}
#endif

	scu_ctrl = vmm_readl(scu_base + SCU_CTRL);
	/* already enabled? */
	if (scu_ctrl & 1) {
		return;
	}

	scu_ctrl |= 1;
	vmm_writel(scu_ctrl, scu_base + SCU_CTRL);

	/*
	 * Ensure that the data accessed by CPU0 before the SCU was
	 * initialised is visible to the other CPUs.
	 */
	vmm_flush_cache_all();
}
示例#5
0
    Platform* PlatformFactory::platform(const std::string &description)
    {
        int platform_id;
        bool is_found = false;
        Platform *result = NULL;
        platform_id = read_cpuid();
        for (auto it = platforms.begin(); it != platforms.end(); ++it) {
            if ((*it) != NULL && (*it)->model_supported(platform_id, description)) {
                result = (*it);
                break;
            }
        }
        for (auto it = platform_imps.begin(); it != platform_imps.end(); ++it) {
            if ((*it) != NULL && result != NULL &&
                (*it)->model_supported(platform_id)) {
                result->set_implementation((*it));
                is_found = true;
                break;
            }
        }
        if (!is_found) {
            result = NULL;
        }
        if (!result) {
            // If we get here, no acceptable platform was found
            throw Exception("cpuid: " + std::to_string(platform_id), GEOPM_ERROR_PLATFORM_UNSUPPORTED, __FILE__, __LINE__);
        }

        return result;
    }
示例#6
0
文件: reset.c 项目: ammubhave/bargud
static bool cpu_has_32bit_el1(void)
{
	u64 pfr0;

	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
	return !!(pfr0 & 0x20);
}
示例#7
0
文件: smp_scu.c 项目: Goodzila/m040
/*
 * Enable the SCU
 */
void scu_enable(void __iomem *scu_base)
{
	u32 scu_ctrl;

#ifdef CONFIG_ARM_ERRATA_764369
	/* Cortex-A9 only */
	if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
		scu_ctrl = __raw_readl(scu_base + 0x30);
		if (!(scu_ctrl & 1))
			__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
	}
#endif

	scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
	/* already enabled? */
	if (scu_ctrl & 1)
		return;

	/*
	  * To be a good tweak and it really reduces L2 access latency
	  */
#ifdef CONFIG_ARCH_EXYNOS4	  
	scu_ctrl |= (1<<3);
#endif

	scu_ctrl |= 1;
	__raw_writel(scu_ctrl, scu_base + SCU_CTRL);

	/*
	 * Ensure that the data accessed by CPU0 before the SCU was
	 * initialised is visible to the other CPUs.
	 */
	flush_cache_all();
}
示例#8
0
/*
 * Enable the SCU
 */
void scu_enable(void __iomem *scu_base)
{
	u32 scu_ctrl;

#ifdef CONFIG_ARM_ERRATA_764369
	/* Cortex-A9 only */
	if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
		scu_ctrl = __raw_readl(scu_base + 0x30);
		if (!(scu_ctrl & 1))
			__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
	}
#endif

	scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
	/* already enabled? */
	if (scu_ctrl & 1)
		return;

	if ((soc_is_exynos4412() && (samsung_rev() >= EXYNOS4412_REV_1_0)) ||
		soc_is_exynos4210())
		scu_ctrl |= (1<<3);

	scu_ctrl |= 1;
	__raw_writel(scu_ctrl, scu_base + SCU_CTRL);

	/*
	 * Ensure that the data accessed by CPU0 before the SCU was
	 * initialised is visible to the other CPUs.
	 */
	flush_cache_all();

#ifdef CONFIG_MACH_PX
	logbuf_force_unlock();
#endif
}
static void __init setup_processor(void)
{
	struct cpu_info *cpu_info;
	u64 reg_value;

	/*
	 * locate processor in the list of supported processor
	 * types.  The linker builds this table for us from the
	 * entries in arch/arm/mm/proc.S
	 */
	cpu_info = lookup_processor_type(read_cpuid_id());
	if (!cpu_info) {
		printk("CPU configuration botched (ID %08x), unable to continue.\n",
		       read_cpuid_id());
		while (1);
	}

	cpu_name = cpu_info->cpu_name;

	printk("CPU: %s [%08x] revision %d\n",
	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15);

	sprintf(init_utsname()->machine, "aarch64");
	elf_hwcap = 0;

	/* Read the number of ASID bits */
	reg_value = read_cpuid(ID_AA64MMFR0_EL1) & 0xf0;
	if (reg_value == 0x00)
		max_asid_bits = 8;
	else if (reg_value == 0x20)
		max_asid_bits = 16;
	else
		BUG_ON(1);
	cpu_last_asid = 1 << max_asid_bits;
}
示例#10
0
int main(int argc, char** argv)
{
    int rank;
    int cpu_id;
    double timeout;
    int rank_per_node = 0;
    geopm::PlatformImp *plat = NULL;
    geopm::ProfileSampler *sampler = NULL;;
    geopm_time_s start, stop;
    std::vector<std::pair<uint64_t, struct geopm_prof_message_s> > sample;
    size_t sample_length;
    const double LOOP_TIMEOUT = 8E-6; //8 ms loop

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    if (!rank) {
        // TODO: wrap this in a PlatformImp factory.
        cpu_id = read_cpuid();
        if (cpu_id == 0x62D || cpu_id == 0x63E) {
            plat = (geopm::PlatformImp *)(new geopm::IVTPlatformImp);
        }
        else if (cpu_id == 0x63F) {
            plat = (geopm::PlatformImp *)(new geopm::HSXPlatformImp);
        }
        else {
            throw geopm::Exception("cpuid: " + std::to_string(cpu_id), GEOPM_ERROR_PLATFORM_UNSUPPORTED, __FILE__, __LINE__);
        }
        plat->initialize();
        sampler = new geopm::ProfileSampler(4096);
        sampler->initialize(rank_per_node);
        sample.resize(sampler->capacity());

        while (!sampler->do_shutdown()) {
            geopm_time(&start);
            sampler->sample(sample, sample_length);
            decide(plat, sample, sample_length);

            timeout = 0.0;
            while (timeout < LOOP_TIMEOUT) {
                geopm_time(&stop);
                timeout = geopm_time_diff(&start, &stop);
            }
        }
    }

    MPI_Barrier(MPI_COMM_WORLD);

    if (!rank) {
        delete sampler;
        delete plat;
    }

    return 0;
}
示例#11
0
/*
 * We need to ensure that shared mappings are correctly aligned to
 * avoid aliasing issues with VIPT caches.  We need to ensure that
 * a specific page of an object is always mapped at a multiple of
 * SHMLBA bytes.
 *
 * We unconditionally provide this function for all cases, however
 * in the VIVT case, we optimise out the alignment rules.
 */
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long start_addr;
#ifdef CONFIG_CPU_V6
	unsigned int cache_type;
	int do_align = 0, aliasing = 0;

	/*
	 * We only need to do colour alignment if either the I or D
	 * caches alias.  This is indicated by bits 9 and 21 of the
	 * cache type register.
	 */
	cache_type = read_cpuid(CPUID_CACHETYPE);
	if (cache_type != read_cpuid(CPUID_ID)) {
		aliasing = (cache_type | cache_type >> 12) & (1 << 11);
		if (aliasing)
			do_align = filp || flags & MAP_SHARED;
	}
/*
 * FP/SIMD support code initialisation.
 */
static int __init fpsimd_init(void)
{
	u64 pfr = read_cpuid(ID_AA64PFR0_EL1);

	if (pfr & (0xf << 16)) {
		pr_notice("Floating-point is not implemented\n");
		return 0;
	}
	elf_hwcap |= HWCAP_FP;

	if (pfr & (0xf << 20))
		pr_notice("Advanced SIMD is not implemented\n");
	else
		elf_hwcap |= HWCAP_ASIMD;

	return 0;
}
示例#13
0
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
	info->reg_cntfrq = arch_timer_get_cntfrq();
	info->reg_ctr = read_cpuid_cachetype();
	info->reg_dczid = read_cpuid(DCZID_EL0);
	info->reg_midr = read_cpuid_id();

	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);

	info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
	info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
	info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
	info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
	info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
	info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
	info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
	info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
	info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
	info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
	info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
	info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);

	cpuinfo_detect_icache_policy(info);
}
示例#14
0
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
	info->reg_cntfrq = arch_timer_get_cntfrq();
	info->reg_ctr = read_cpuid_cachetype();
	info->reg_dczid = read_cpuid(DCZID_EL0);
	info->reg_midr = read_cpuid_id();
	info->reg_revidr = read_cpuid(REVIDR_EL1);

	info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
	info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
	info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);

	/* Update the 32bit ID registers only if AArch32 is implemented */
	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
		info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
		info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
		info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
		info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
		info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
		info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
		info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
		info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
		info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
		info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
		info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
		info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
		info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);

		info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
		info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
		info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
	}

	cpuinfo_detect_icache_policy(info);
}
示例#15
0
static void __init setup_processor(void)
{
	u64 features;
	s64 block;
	u32 cwg;
	int cls;

	printk("CPU: AArch64 Processor [%08x] revision %d\n",
	       read_cpuid_id(), read_cpuid_id() & 15);

	sprintf(init_utsname()->machine, ELF_PLATFORM);
	elf_hwcap = 0;

	cpuinfo_store_boot_cpu();

	/*
	 * Check for sane CTR_EL0.CWG value.
	 */
	cwg = cache_type_cwg();
	cls = cache_line_size();
	if (!cwg)
		pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
			cls);
	if (L1_CACHE_BYTES < cls)
		pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
			L1_CACHE_BYTES, cls);

	/*
	 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
	 * The blocks we test below represent incremental functionality
	 * for non-negative values. Negative values are reserved.
	 */
	features = read_cpuid(ID_AA64ISAR0_EL1);
	block = cpuid_feature_extract_field(features, 4);
	if (block > 0) {
		switch (block) {
		default:
		case 2:
			elf_hwcap |= HWCAP_PMULL;
		case 1:
			elf_hwcap |= HWCAP_AES;
		case 0:
			break;
		}
	}

	if (cpuid_feature_extract_field(features, 8) > 0)
		elf_hwcap |= HWCAP_SHA1;

	if (cpuid_feature_extract_field(features, 12) > 0)
		elf_hwcap |= HWCAP_SHA2;

	if (cpuid_feature_extract_field(features, 16) > 0)
		elf_hwcap |= HWCAP_CRC32;

	block = cpuid_feature_extract_field(features, 20);
	if (block > 0) {
		switch (block) {
		default:
		case 2:
			elf_hwcap |= HWCAP_ATOMICS;
		case 1:
			/* RESERVED */
		case 0:
			break;
		}
	}

#ifdef CONFIG_COMPAT
	/*
	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
	 * the AArch32 32-bit execution state.
	 */
	features = read_cpuid(ID_ISAR5_EL1);
	block = cpuid_feature_extract_field(features, 4);
	if (block > 0) {
		switch (block) {
		default:
		case 2:
			compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
		case 1:
			compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
		case 0:
			break;
		}
	}

	if (cpuid_feature_extract_field(features, 8) > 0)
		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;

	if (cpuid_feature_extract_field(features, 12) > 0)
		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;

	if (cpuid_feature_extract_field(features, 16) > 0)
		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
#endif
}