static void __init setup_processor(void)
{
	struct cpu_info *cpu_info;
	u64 reg_value;

	/*
	 * locate processor in the list of supported processor
	 * types.  The linker builds this table for us from the
	 * entries in arch/arm/mm/proc.S
	 */
	cpu_info = lookup_processor_type(read_cpuid_id());
	if (!cpu_info) {
		printk("CPU configuration botched (ID %08x), unable to continue.\n",
		       read_cpuid_id());
		while (1);
	}

	cpu_name = cpu_info->cpu_name;

	printk("CPU: %s [%08x] revision %d\n",
	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15);

	sprintf(init_utsname()->machine, "aarch64");
	elf_hwcap = 0;

	/* Read the number of ASID bits */
	reg_value = read_cpuid(ID_AA64MMFR0_EL1) & 0xf0;
	if (reg_value == 0x00)
		max_asid_bits = 8;
	else if (reg_value == 0x20)
		max_asid_bits = 16;
	else
		BUG_ON(1);
	cpu_last_asid = 1 << max_asid_bits;
}
Beispiel #2
0
static int __init vfp_init(void)
{
	unsigned int vfpsid;
	unsigned int cpu_arch = cpu_architecture();

	if (cpu_arch >= CPU_ARCH_ARMv6)
		on_each_cpu(vfp_enable, NULL, 1);

	vfp_vector = vfp_testing_entry;
	barrier();
	vfpsid = fmrx(FPSID);
	barrier();
	vfp_vector = vfp_null_entry;

	printk(KERN_INFO "VFP support v0.3: ");
	if (VFP_arch)
		printk("not present\n");
	else if (vfpsid & FPSID_NODOUBLE) {
		printk("no double precision support\n");
	} else {
		hotcpu_notifier(vfp_hotplug, 0);

		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  
		printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
			(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
			(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);

		vfp_vector = vfp_support_entry;

		thread_register_notifier(&vfp_notifier_block);
		vfp_pm_init();

		elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
		if (VFP_arch >= 2) {
			elf_hwcap |= HWCAP_VFPv3;

			if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
				elf_hwcap |= HWCAP_VFPv3D16;
		}
#endif
		if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
#ifdef CONFIG_NEON
			if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
				elf_hwcap |= HWCAP_NEON;
#endif
			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000 ||
			    (read_cpuid_id() & 0xff00fc00) == 0x51000400)
				elf_hwcap |= HWCAP_VFPv4;
		}
	}
	return 0;
}
int cpu_architecture(void)
{
	int cpu_arch;

	if ((read_cpuid_id() & 0x0008f000) == 0) {
		cpu_arch = CPU_ARCH_UNKNOWN;
	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
		cpu_arch = (read_cpuid_id() >> 16) & 7;
		if (cpu_arch)
			cpu_arch += CPU_ARCH_ARMv3;
	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
Beispiel #4
0
/*
 * Enable the SCU
 */
void scu_enable(void __iomem *scu_base)
{
	u32 scu_ctrl;

#ifdef CONFIG_ARM_ERRATA_764369
	/* Cortex-A9 only */
	if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
		scu_ctrl = __raw_readl(scu_base + 0x30);
		if (!(scu_ctrl & 1))
			__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
	}
#endif

	scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
	/* already enabled? */
	if (scu_ctrl & 1)
		return;

	scu_ctrl |= 1;
#ifdef CONFIG_ARCH_TEGRA_14x_SOC
	/* Enable SCU speculative line fill enable */
	scu_ctrl |= 8;
#endif
	__raw_writel(scu_ctrl, scu_base + SCU_CTRL);

	/*
	 * Ensure that the data accessed by CPU0 before the SCU was
	 * initialised is visible to the other CPUs.
	 */
	flush_cache_all();
}
Beispiel #5
0
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
	info->reg_cntfrq = arch_timer_get_cntfrq();
	info->reg_ctr = read_cpuid_cachetype();
	info->reg_dczid = read_cpuid(DCZID_EL0);
	info->reg_midr = read_cpuid_id();

	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);

	info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
	info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
	info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
	info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
	info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
	info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
	info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
	info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
	info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
	info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
	info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
	info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);

	cpuinfo_detect_icache_policy(info);
}
/*
 * Enable the SCU
 */
void scu_enable(void __iomem *scu_base)
{
	u32 scu_ctrl;

#ifdef CONFIG_ARM_ERRATA_764369
	/* Cortex-A9 only */
	if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
		scu_ctrl = readl_relaxed(scu_base + 0x30);
		if (!(scu_ctrl & 1))
			writel_relaxed(scu_ctrl | 0x1, scu_base + 0x30);
	}
#endif

#ifdef CONFIG_ARCH_HI6XXX
	return;
#else
	scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
	/* already enabled? */
	if (scu_ctrl & 1)
		return;

	scu_ctrl |= 1;
	writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);

	/*
	 * Ensure that the data accessed by CPU0 before the SCU was
	 * initialised is visible to the other CPUs.
	 */
	flush_cache_all();
#endif
}
Beispiel #7
0
Datei: scu.c Projekt: KGG814/AOS
/* Enable the SCU */
void scu_enable(void *_scu_base)
{
    uint32_t scu_ctrl;
    volatile uint32_t *scu_base = (volatile uint32_t*)_scu_base;

#ifdef CONFIG_ARM_ERRATA_764369
    /* Cortex-A9 only */
    if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
        scu_ctrl = scu_base[0x30 / 4];
        if (!(scu_ctrl & 1)) {
            scu_base[0x30 / 4] = scu_ctrl | 0x1;
        }
    }
#endif

    scu_ctrl = scu_base[SCU_CTRL];
    /* already enabled? */
    if (scu_ctrl & 1) {
        return;
    }

    scu_ctrl |= 1;
    scu_base[SCU_CTRL] = scu_ctrl;

    /*
     * Ensure that the data accessed by CPU0 before the SCU was
     * initialised is visible to the other CPUs.
     */
    flush_dcache();
}
Beispiel #8
0
static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
{
	unsigned long cpuid = read_cpuid_id();
	struct arm_pmu *pmu;
	int cpu;

	for_each_possible_cpu(cpu) {
		pmu = per_cpu(probed_pmus, cpu);
		if (!pmu || pmu->acpi_cpuid != cpuid)
			continue;

		return pmu;
	}

	pmu = armpmu_alloc();
	if (!pmu) {
		pr_warn("Unable to allocate PMU for CPU%d\n",
			smp_processor_id());
		return NULL;
	}

	pmu->acpi_cpuid = cpuid;

	return pmu;
}
Beispiel #9
0
/**
 * kvm_reset_vcpu - sets core registers and cp15 registers to reset value
 * @vcpu: The VCPU pointer
 *
 * This function finds the right table above and sets the registers on the
 * virtual CPU struct to their architectually defined reset values.
 */
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
	struct kvm_regs *reset_regs;
	const struct kvm_irq_level *cpu_vtimer_irq;

	switch (vcpu->arch.target) {
	case KVM_ARM_TARGET_CORTEX_A7:
	case KVM_ARM_TARGET_CORTEX_A15:
		reset_regs = &cortexa_regs_reset;
		vcpu->arch.midr = read_cpuid_id();
		cpu_vtimer_irq = &cortexa_vtimer_irq;
		break;
	default:
		return -ENODEV;
	}

	/* Reset core registers */
	memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));

	/* Reset CP15 registers */
	kvm_reset_coprocs(vcpu);

	/* Reset arch_timer context */
	kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);

	return 0;
}
Beispiel #10
0
/* Initialize PMU properties of the current CPU  */
static void init_pmu_props_cpu(void* dummy)
{
	int this_cpu=smp_processor_id();
	pmcr_t reg;
	int i=0;
	u32 cpuid;
	u32 model;
	pmu_props_t* props=&pmu_props_cpu[this_cpu];


	init_pmcr(&reg);

	props->nr_gp_pmcs=get_bit_field32(&reg.m_n) & ARMV7_PMNC_N_MASK ;
	props->nr_fixed_pmcs=1; //Cycle counter
	props->pmc_width=32;

	/* Mask */
	props->pmc_width_mask=0;
	for (i=0; i<props->pmc_width; i++)
		props->pmc_width_mask|=(1ULL<<i);

	/* Read PMU ID*/
	props->processor_model=reg.m_value;
	cpuid=read_cpuid_id();

	if ((cpuid & 0x0008f000) == 0x00000000) {
		/* pre-ARM7 */
		model=cpuid >> 4;
	} else {
Beispiel #11
0
/* Initialize PMU properties of the current CPU  */
static void init_pmu_props_cpu(void* dummy)
{
	int this_cpu=smp_processor_id();
	pmcr_t reg;
	int i=0;
	u32 cpuid;
	u32 model;
	pmu_props_t* props=&pmu_props_cpu[this_cpu];


	init_pmcr(&reg);

	/* Read the nb of CNTx counters supported from PMNC */
	props->nr_gp_pmcs=get_bit_field32(&reg.m_n) & ARMV8_PMCR_N_MASK ;
	props->nr_fixed_pmcs=1; //Cycle counter
	props->pmc_width=32;

	/* Mask */
	props->pmc_width_mask=0;
	for (i=0; i<props->pmc_width; i++)
		props->pmc_width_mask|=(1ULL<<i);

	/* Read PMU ID*/
	props->processor_model=reg.m_value;

	/* Hack extracted from c_show() in arch/arm/kernel/setup.c */
	cpuid=read_cpuid_id();

	if ((cpuid & 0x0008f000) == 0x00000000) {
		/* pre-ARM7 */
		model=cpuid >> 4;
	} else {
Beispiel #12
0
/*
 * Initialise the CPU possible map early - this describes the CPUs
 * which may be present or become present in the system.
 */
static void __init omap4_smp_init_cpus(void)
{
	unsigned int i = 0, ncores = 1, cpu_id;

	/* Use ARM cpuid check here, as SoC detection will not work so early */
	cpu_id = read_cpuid_id() & CPU_MASK;
	if (cpu_id == CPU_CORTEX_A9) {
		/*
		 * Currently we can't call ioremap here because
		 * SoC detection won't work until after init_early.
		 */
		scu_base =  OMAP2_L4_IO_ADDRESS(scu_a9_get_base());
		BUG_ON(!scu_base);
		ncores = scu_get_core_count(scu_base);
	} else if (cpu_id == CPU_CORTEX_A15) {
		ncores = OMAP5_CORE_COUNT;
	}

	/* sanity check */
	if (ncores > nr_cpu_ids) {
		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
			ncores, nr_cpu_ids);
		ncores = nr_cpu_ids;
	}

	for (i = 0; i < ncores; i++)
		set_cpu_possible(i, true);
}
Beispiel #13
0
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
{
	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
	return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
				       entry->midr_range_min,
				       entry->midr_range_max);
}
static int erratum_a15_798181(void)
{
	unsigned int midr = read_cpuid_id();

	/* Cortex-A15 r0p0..r3p2 affected */
	if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
		return 0;
	return 1;
}
static void kbasep_cpuprops_uk_get_cpu_id_info(struct kbase_uk_cpuprops * const kbase_props)
{
	kbase_props->props.cpu_id.id           = read_cpuid_id();

	kbase_props->props.cpu_id.valid        = 1;
	kbase_props->props.cpu_id.rev          = KBASE_CPUPROPS_ID_GET_REV(kbase_props->props.cpu_id.id);
	kbase_props->props.cpu_id.part         = KBASE_CPUPROPS_ID_GET_PART_NR(kbase_props->props.cpu_id.id);
	kbase_props->props.cpu_id.arch         = KBASE_CPUPROPS_ID_GET_ARCH(kbase_props->props.cpu_id.id);
	kbase_props->props.cpu_id.variant      = KBASE_CPUPROPS_ID_GET_VARIANT(kbase_props->props.cpu_id.id);
	kbase_props->props.cpu_id.implementer  = KBASE_CPUPROPS_ID_GET_CODE(kbase_props->props.cpu_id.id);
}
Beispiel #16
0
static bool __maybe_unused
is_affected_midr_range(struct arm64_cpu_capabilities *entry)
{
	u32 midr = read_cpuid_id();

	if ((midr & CPU_MODEL_MASK) != entry->midr_model)
		return false;

	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;

	return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
}
Beispiel #17
0
void __init smp_setup_processor_id(void)
{
	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
	cpu_logical_map(0) = mpidr;

	/*
	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
	 * using percpu variable early, for example, lockdep will
	 * access percpu variable inside lock_release
	 */
	set_my_cpu_offset(0);
	pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
		(unsigned long)mpidr, read_cpuid_id());
}
Beispiel #18
0
/*
 * CPU PMU identification and probing.
 */
static int probe_current_pmu(struct arm_pmu *pmu)
{
	int cpu = get_cpu();
	unsigned long cpuid = read_cpuid_id();
	unsigned long implementor = (cpuid & 0xFF000000) >> 24;
	unsigned long part_number = (cpuid & 0xFFF0);
	int ret = -ENODEV;

	pr_info("probing PMU on CPU %d\n", cpu);

	/* ARM Ltd CPUs. */
	if (0x41 == implementor) {
		switch (part_number) {
		case 0xB360:	/* ARM1136 */
		case 0xB560:	/* ARM1156 */
		case 0xB760:	/* ARM1176 */
			ret = armv6pmu_init(pmu);
			break;
		case 0xB020:	/* ARM11mpcore */
			ret = armv6mpcore_pmu_init(pmu);
			break;
		case 0xC080:	/* Cortex-A8 */
			ret = armv7_a8_pmu_init(pmu);
			break;
		case 0xC090:	/* Cortex-A9 */
			ret = armv7_a9_pmu_init(pmu);
			break;
		case 0xC050:	/* Cortex-A5 */
			ret = armv7_a5_pmu_init(pmu);
			break;
		case 0xC0F0:	/* Cortex-A15 */
			ret = armv7_a15_pmu_init(pmu);
			break;
		case 0xC070:	/* Cortex-A7 */
			ret = armv7_a7_pmu_init(pmu);
			break;
		}
	/* Intel CPUs [xscale]. */
	} else if (0x69 == implementor) {
		part_number = (cpuid >> 13) & 0x7;
		switch (part_number) {
		case 1:
			ret = xscale1pmu_init(pmu);
			break;
		case 2:
			ret = xscale2pmu_init(pmu);
			break;
		}
	}
static void __init setup_processor(void)
{
	struct cpu_info *cpu_info;

	/*
	 * locate processor in the list of supported processor
	 * types.  The linker builds this table for us from the
	 * entries in arch/arm/mm/proc.S
	 */
	cpu_info = lookup_processor_type(read_cpuid_id());
	if (!cpu_info) {
		printk("CPU configuration botched (ID %08x), unable to continue.\n",
		       read_cpuid_id());
		while (1);
	}

	cpu_name = cpu_info->cpu_name;

	printk("CPU: %s [%08x] revision %d\n",
	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15);

	sprintf(init_utsname()->machine, "aarch64");
	elf_hwcap = 0;
}
Beispiel #20
0
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
	info->reg_cntfrq = arch_timer_get_cntfrq();
	info->reg_ctr = read_cpuid_cachetype();
	info->reg_dczid = read_cpuid(DCZID_EL0);
	info->reg_midr = read_cpuid_id();
	info->reg_revidr = read_cpuid(REVIDR_EL1);

	info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
	info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
	info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
	info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);

	/* Update the 32bit ID registers only if AArch32 is implemented */
	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
		info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
		info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
		info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
		info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
		info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
		info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
		info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
		info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
		info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
		info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
		info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
		info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
		info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);

		info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
		info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
		info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
	}

	if (IS_ENABLED(CONFIG_ARM64_SVE) &&
	    id_aa64pfr0_sve(info->reg_id_aa64pfr0))
		info->reg_zcr = read_zcr_features();

	cpuinfo_detect_icache_policy(info);
}
Beispiel #21
0
/*
 * CPU PMU identification and probing.
 */
static int probe_current_pmu(struct arm_pmu *pmu)
{
    int cpu = get_cpu();
    unsigned int cpuid = read_cpuid_id();
    int ret = -ENODEV;
    const struct pmu_probe_info *info;

    pr_info("probing PMU on CPU %d\n", cpu);

    for (info = pmu_probe_table; info->init != NULL; info++) {
        if ((cpuid & info->mask) != info->cpuid)
            continue;
        ret = info->init(pmu);
        break;
    }

    put_cpu();
    return ret;
}
Beispiel #22
0
static int __init ixp4xx_wdt_init(void)
{
    int ret;

    if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
        printk(KERN_ERR "IXP4XXX Watchdog: Rev. A0 IXP42x CPU detected"
               " - watchdog disabled\n");

        return -ENODEV;
    }
    spin_lock_init(&wdt_lock);
    boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ?
                  WDIOF_CARDRESET : 0;
    ret = misc_register(&ixp4xx_wdt_miscdev);
    if (ret == 0)
        printk(KERN_INFO "IXP4xx Watchdog Timer: heartbeat %d sec\n",
               heartbeat);
    return ret;
}
Beispiel #23
0
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
	info->reg_cntfrq = arch_timer_get_cntfrq();
	info->reg_ctr = read_cpuid_cachetype();
	info->reg_dczid = read_cpuid(DCZID_EL0);
	info->reg_midr = read_cpuid_id();

	info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
	info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
	info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);

	/* Update the 32bit ID registers only if AArch32 is implemented */
	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
		info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
		info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
		info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
		info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
		info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
		info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
		info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
		info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
		info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
		info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
		info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
		info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
		info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);

		info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
		info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
		info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
	}

	cpuinfo_detect_icache_policy(info);

	check_local_cpu_errata();
}
static void __cpuinit gt_setup(unsigned long base_paddr, unsigned bits)
{
	if ((read_cpuid_id() & 0xf00000) == 0)
		return;

	twd_clk = twd_get_clock();

	if (!IS_ERR_OR_NULL(twd_clk))
		twd_timer_rate = clk_get_rate(twd_clk);
	else
		twd_calibrate_rate();

	common_setup_called = true;
	
	gt_base = ioremap(base_paddr, SZ_256);
	BUG_ON(!gt_base);

	/* Start global timer */
	__raw_writel(1, gt_base + 0x8);

	tsc_info.type = IPIPE_TSC_TYPE_FREERUNNING;
	tsc_info.freq = twd_timer_rate;
	tsc_info.counter_vaddr = (unsigned long)gt_base;
	tsc_info.u.counter_paddr = base_paddr;
	
	switch(bits) {
	case 64:
		tsc_info.u.mask = 0xffffffffffffffffULL;
		break;
	case 32:
		tsc_info.u.mask = 0xffffffff;
		break;
	default:
		/* Only supported as a 32 bits or 64 bits */
		BUG();
	}

	__ipipe_tsc_register(&tsc_info);
}
Beispiel #25
0
/*
 * We need to ensure that shared mappings are correctly aligned to
 * avoid aliasing issues with VIPT caches.  We need to ensure that
 * a specific page of an object is always mapped at a multiple of
 * SHMLBA bytes.
 *
 * We unconditionally provide this function for all cases, however
 * in the VIVT case, we optimise out the alignment rules.
 */
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long start_addr;
#ifdef CONFIG_CPU_V6
	unsigned int cache_type;
	int do_align = 0, aliasing = 0;

	/*
	 * We only need to do colour alignment if either the I or D
	 * caches alias.  This is indicated by bits 9 and 21 of the
	 * cache type register.
	 */
	cache_type = read_cpuid_cachetype();
	if (cache_type != read_cpuid_id()) {
		aliasing = (cache_type | cache_type >> 12) & (1 << 11);
		if (aliasing)
			do_align = filp || flags & MAP_SHARED;
	}
Beispiel #26
0
static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int cpu_id)
{
	struct cpuinfo_arm *cpu_info;
	struct cpumask *cpumask;
	unsigned long cpuid;
	int cpu;

	cpumask = kzalloc(cpumask_size(), GFP_KERNEL);
	if (!cpumask)
		return -ENOMEM;

	for_each_possible_cpu(cpu) {
		cpu_info = &per_cpu(cpu_data, cpu);
		cpuid = is_smp() ? cpu_info->cpuid : read_cpuid_id();

		/* read cpu id part number */
		if ((cpuid & 0xFFF0) == cpu_id)
			cpumask_set_cpu(cpu, cpumask);
	}

	drv->cpumask = cpumask;

	return 0;
}
Beispiel #27
0
/*
 * VFP support code initialisation.
 */
static int __init vfp_init(void)
{
	unsigned int vfpsid;
	unsigned int cpu_arch = cpu_architecture();

	if (cpu_arch >= CPU_ARCH_ARMv6)
		on_each_cpu(vfp_enable, NULL, 1);

	/*
	 * First check that there is a VFP that we can use.
	 * The handler is already setup to just log calls, so
	 * we just need to read the VFPSID register.
	 */
	vfp_vector = vfp_testing_entry;
	barrier();
	vfpsid = fmrx(FPSID);
	barrier();
	vfp_vector = vfp_null_entry;

	printk(KERN_INFO "VFP support v0.3: ");
	if (VFP_arch)
		printk("not present\n");
	else if (vfpsid & FPSID_NODOUBLE) {
		printk("no double precision support\n");
	} else {
		hotcpu_notifier(vfp_hotplug, 0);

		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
		printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
			(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
			(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);

		vfp_vector = vfp_support_entry;

		thread_register_notifier(&vfp_notifier_block);
		vfp_pm_init();

		/*
		 * We detected VFP, and the support code is
		 * in place; report VFP support to userspace.
		 */
		elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
		if (VFP_arch >= 2) {
			elf_hwcap |= HWCAP_VFPv3;

			/*
			 * Check for VFPv3 D16 and VFPv4 D16.  CPUs in
			 * this configuration only have 16 x 64bit
			 * registers.
			 */
			if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
				elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
			else
				elf_hwcap |= HWCAP_VFPD32;
		}
#endif
		/*
		 * Check for the presence of the Advanced SIMD
		 * load/store instructions, integer and single
		 * precision floating point operations. Only check
		 * for NEON if the hardware has the MVFR registers.
		 */
		if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
#ifdef CONFIG_NEON
			if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
				elf_hwcap |= HWCAP_NEON;
#endif
			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
				elf_hwcap |= HWCAP_VFPv4;
		}
	}
	return 0;
}
Beispiel #28
0
static void __init global_timer_of_register(struct device_node *np)
{
	struct clk *gt_clk;
	int err = 0;

	/*
	 * In r2p0 the comparators for each processor with the global timer
	 * fire when the timer value is greater than or equal to. In previous
	 * revisions the comparators fired when the timer value was equal to.
	 */
	if ((read_cpuid_id() & 0xf0000f) < 0x200000) {
		pr_warn("global-timer: non support for this cpu version.\n");
		return;
	}

	gt_ppi = irq_of_parse_and_map(np, 0);
	if (!gt_ppi) {
		pr_warn("global-timer: unable to parse irq\n");
		return;
	}

	gt_base = of_iomap(np, 0);
	if (!gt_base) {
		pr_warn("global-timer: invalid base address\n");
		return;
	}

	gt_clk = of_clk_get(np, 0);
	if (!IS_ERR(gt_clk)) {
		err = clk_prepare_enable(gt_clk);
		if (err)
			goto out_unmap;
	} else {
		pr_warn("global-timer: clk not found\n");
		err = -EINVAL;
		goto out_unmap;
	}

	gt_clk_rate = clk_get_rate(gt_clk);
	gt_evt = alloc_percpu(struct clock_event_device);
	if (!gt_evt) {
		pr_warn("global-timer: can't allocate memory\n");
		err = -ENOMEM;
		goto out_clk;
	}

	clk_rate_change_nb.notifier_call = arm_global_timer_clockevent_cb;
	clk_rate_change_nb.next = NULL;
	if (clk_notifier_register(gt_clk,
				  &clk_rate_change_nb)) {
		pr_warn("Unable to register clock notifier.\n");
	}

	err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt,
				 "gt", gt_evt);
	if (err) {
		pr_warn("global-timer: can't register interrupt %d (%d)\n",
			gt_ppi, err);
		goto out_free;
	}

	err = register_cpu_notifier(&gt_cpu_nb);
	if (err) {
		pr_warn("global-timer: unable to register cpu notifier.\n");
		goto out_irq;
	}

	/* Immediately configure the timer on the boot CPU */
	gt_clocksource_init();
	gt_clockevents_init(this_cpu_ptr(gt_evt));

	return;

out_irq:
	free_percpu_irq(gt_ppi, gt_evt);
out_free:
	free_percpu(gt_evt);
out_clk:
	clk_disable_unprepare(gt_clk);
out_unmap:
	iounmap(gt_base);
	WARN(err, "ARM Global timer register failed (%d)\n", err);
}
static int __init vfp_init(void)
{
	unsigned int vfpsid;
	unsigned int cpu_arch = cpu_architecture();
#ifdef CONFIG_PROC_FS
	static struct proc_dir_entry *procfs_entry;
#endif
	if (cpu_arch >= CPU_ARCH_ARMv6)
		on_each_cpu(vfp_enable, NULL, 1);

	vfp_vector = vfp_testing_entry;
	barrier();
	vfpsid = fmrx(FPSID);
	barrier();
	vfp_vector = vfp_null_entry;

	printk(KERN_INFO "VFP support v0.3: ");
	if (VFP_arch)
		printk("not present\n");
	else if (vfpsid & FPSID_NODOUBLE) {
		printk("no double precision support\n");
	} else {
		hotcpu_notifier(vfp_hotplug, 0);

		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  
		printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
			(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
			(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);

		vfp_vector = vfp_support_entry;

		thread_register_notifier(&vfp_notifier_block);
		vfp_pm_init();

		elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
		if (VFP_arch >= 2) {
			elf_hwcap |= HWCAP_VFPv3;

			/*
			 * Check for VFPv3 D16 and VFPv4 D16.  CPUs in
			 * this configuration only have 16 x 64bit
			 * registers.
			 */
			if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
				elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
			else
				elf_hwcap |= HWCAP_VFPD32;
		}
#endif
		if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
#ifdef CONFIG_NEON
			if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
				elf_hwcap |= HWCAP_NEON;
#endif
#ifdef CONFIG_VFPv3
			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
				elf_hwcap |= HWCAP_VFPv4;
#endif
		}
	}

#ifdef CONFIG_PROC_FS
	procfs_entry = create_proc_entry("cpu/vfp_bounce", S_IRUGO, NULL);

	if (procfs_entry)
		procfs_entry->read_proc = proc_read_status;
	else
		pr_err("Failed to create procfs node for VFP bounce reporting\n");
#endif

	return 0;
}
static int is_80219(void)
{
	return !!((read_cpuid_id() & 0xffffffe0) == 0x69052e20);
}