コード例 #1
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;
#endif

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	/*
	 * Try to reuse one of the static mapping whenever possible.
	 */
	read_lock(&vmlist_lock);
	for (area = vmlist; area; area = area->next) {
		if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
			break;
		if (!(area->flags & VM_ARM_STATIC_MAPPING))
			continue;
		if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
			continue;
		if (__phys_to_pfn(area->phys_addr) > pfn ||
		    __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
			continue;
		/* we can drop the lock here as we know *area is static */
		read_unlock(&vmlist_lock);
		addr = (unsigned long)area->addr;
		addr += __pfn_to_phys(pfn) - area->phys_addr;
		return (void __iomem *) (offset + addr);
	}
	read_unlock(&vmlist_lock);

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
コード例 #2
0
/*
 * VFP support code initialisation.
 */
static int __init vfp_init(void)
{
	unsigned int vfpsid;
	unsigned int cpu_arch = cpu_architecture();
#ifdef CONFIG_PROC_FS
	static struct proc_dir_entry *procfs_entry;
#endif
	if (cpu_arch >= CPU_ARCH_ARMv6)
		on_each_cpu(vfp_enable, NULL, 1);

	/*
	 * First check that there is a VFP that we can use.
	 * The handler is already setup to just log calls, so
	 * we just need to read the VFPSID register.
	 */
	vfp_vector = vfp_testing_entry;
	barrier();
	vfpsid = fmrx(FPSID);
	barrier();
	vfp_vector = vfp_null_entry;

	printk(KERN_INFO "VFP support v0.3: ");
	if (VFP_arch)
		printk("not present\n");
	else if (vfpsid & FPSID_NODOUBLE) {
		printk("no double precision support\n");
	} else {
		hotcpu_notifier(vfp_hotplug, 0);

		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
		printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
			(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
			(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);

		vfp_vector = vfp_support_entry;

		thread_register_notifier(&vfp_notifier_block);
		vfp_pm_init();

		/*
		 * We detected VFP, and the support code is
		 * in place; report VFP support to userspace.
		 */
		elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
		if (VFP_arch >= 2) {
			elf_hwcap |= HWCAP_VFPv3;

			/*
			 * Check for VFPv3 D16. CPUs in this configuration
			 * only have 16 x 64bit registers.
			 */
			if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
				elf_hwcap |= HWCAP_VFPv3D16;
		}
#endif
		/*
		 * Check for the presence of the Advanced SIMD
		 * load/store instructions, integer and single
		 * precision floating point operations. Only check
		 * for NEON if the hardware has the MVFR registers.
		 */
		if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
#ifdef CONFIG_NEON
			if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
				elf_hwcap |= HWCAP_NEON;
#endif
			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000 ||
			    (read_cpuid_id() & 0xff00fc00) == 0x51000400)
				elf_hwcap |= HWCAP_VFPv4;
		}
	}

#ifdef CONFIG_PROC_FS
	procfs_entry = proc_create("cpu/vfp_bounce", S_IRUGO, NULL,
			&vfp_bounce_fops);
	if (!procfs_entry)
		pr_err("Failed to create procfs node for VFP bounce reporting\n");
#endif

	return 0;
}
コード例 #3
0
/*
 * VFP support code initialisation.
 */
static int __init vfp_init(void)
{
	unsigned int vfpsid;
	unsigned int cpu_arch = cpu_architecture();

	if (cpu_arch >= CPU_ARCH_ARMv6)
		vfp_enable(NULL);

	/*
	 * First check that there is a VFP that we can use.
	 * The handler is already setup to just log calls, so
	 * we just need to read the VFPSID register.
	 */
	vfp_vector = vfp_testing_entry;
	barrier();
	vfpsid = fmrx(FPSID);
	barrier();
	vfp_vector = vfp_null_entry;

	printk(KERN_INFO "VFP support v0.3: ");
	if (VFP_arch)
		printk("not present\n");
	else if (vfpsid & FPSID_NODOUBLE) {
		printk("no double precision support\n");
	} else {
		smp_call_function(vfp_enable, NULL, 1);

		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
		printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
			(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
			(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);

		vfp_vector = vfp_support_entry;

		thread_register_notifier(&vfp_notifier_block);
		vfp_pm_init();

		/*
		 * We detected VFP, and the support code is
		 * in place; report VFP support to userspace.
		 */
		elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
		if (VFP_arch >= 2) {
			elf_hwcap |= HWCAP_VFPv3;

			/*
			 * Check for VFPv3 D16. CPUs in this configuration
			 * only have 16 x 64bit registers.
			 */
			if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
				elf_hwcap |= HWCAP_VFPv3D16;
		}
#endif
#ifdef CONFIG_NEON
		/*
		 * Check for the presence of the Advanced SIMD
		 * load/store instructions, integer and single
		 * precision floating point operations.
		 */
		if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
			elf_hwcap |= HWCAP_NEON;
#endif
	}
	return 0;
}
コード例 #4
0
/* Return true if and only if the ARMv6 unaligned access model is in use. */
static bool cpu_is_v6_unaligned(void)
{
	return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U);
}
コード例 #5
0
static int __init vfp_init(void)
{
	unsigned int vfpsid;
	unsigned int cpu_arch = cpu_architecture();
#ifdef CONFIG_PROC_FS
	static struct proc_dir_entry *procfs_entry;
#endif
	if (cpu_arch >= CPU_ARCH_ARMv6)
		on_each_cpu(vfp_enable, NULL, 1);

	vfp_vector = vfp_testing_entry;
	barrier();
	vfpsid = fmrx(FPSID);
	barrier();
	vfp_vector = vfp_null_entry;

	printk(KERN_INFO "VFP support v0.3: ");
	if (VFP_arch)
		printk("not present\n");
	else if (vfpsid & FPSID_NODOUBLE) {
		printk("no double precision support\n");
	} else {
		hotcpu_notifier(vfp_hotplug, 0);

		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  
		printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
			(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
			(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);

		vfp_vector = vfp_support_entry;

		thread_register_notifier(&vfp_notifier_block);
		vfp_pm_init();

		elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
		if (VFP_arch >= 2) {
			elf_hwcap |= HWCAP_VFPv3;

			if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
				elf_hwcap |= HWCAP_VFPv3D16;
		}
#endif
		if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
#ifdef CONFIG_NEON
			if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
				elf_hwcap |= HWCAP_NEON;
#endif
			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000 ||
			    (read_cpuid_id() & 0xff00fc00) == 0x51000400)
				elf_hwcap |= HWCAP_VFPv4;
		}
	}

#ifdef CONFIG_PROC_FS
	procfs_entry = proc_create("cpu/vfp_bounce", S_IRUGO, NULL,
			&vfp_bounce_fops);
	if (!procfs_entry)
		pr_err("Failed to create procfs node for VFP bounce reporting\n");
#endif

	return 0;
}
コード例 #6
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (pfn_valid(pfn)) {
		printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory.  This leads\n"
		       KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
		       KERN_WARNING "will fail in the next kernel release.  Please fix your driver.\n");
		WARN_ON(1);
	}

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
コード例 #7
0
static int ml_checkImage(struct file* file, macho_header* head) 
{
	/*
		Sanity checks.
	*/
	int retval = -ENOEXEC;
	int file_size = 0;
	size_t macho_header_sz = sizeof(macho_header);
	
	if (head->magic != MH_MAGIC) {
		printk(KERN_WARNING "ml_checkImage: binary is not a macho binary (magic: 0x%p) \n", (void*)head->magic);
		retval = -ENOEXEC;
		goto out_ret;
	}
	
	/*
		Validate architecture.
	*/
	if (head->cputype != CPU_TYPE_ARM) {
		printk(KERN_WARNING "ml_checkImage: wrong architecture in the executable\n");
		retval = -EINVAL;
		goto out_ret;
	}
	
	/*
		Run ARM-specific validation checks
	*/
	if (head->cputype == CPU_TYPE_ARM) {
		if (head->cpusubtype == CPU_SUBTYPE_ARM_V7)
		{
			if (cpu_architecture() != CPU_ARCH_ARMv7) {
				printk(KERN_WARNING "ml_checkImage: armv7 executables are not supported by the current platform\n");
				retval = -EINVAL;
				goto out_ret;
			}
		}
		else if (head->cpusubtype == CPU_SUBTYPE_ARM_V6)
		{
			if (cpu_architecture() != CPU_ARCH_ARMv6) {
				printk(KERN_WARNING "ml_checkImage: armv6 executables are not supported by the current platform\n");
				retval = -EINVAL;
				goto out_ret;
			}
		}
		else {
			printk(KERN_WARNING "ml_checkImage: unrecognized arm version in the executable (%d)\n", head->cpusubtype);
			retval = -EINVAL;
			goto out_ret;
		}
	}
	
	
	/*
	 	Make sure the file size can be retrieved in order 
	  	to perform sanity checks on the file.
	 */
	file_size = ml_getFileSize(file);
	if (file_size < 0) {
		printk(KERN_WARNING "ml_checkImage: can't retrieve binary size \n");
		retval = -EINVAL;
		goto out_ret;
	}
	
	/*
		Main portion of the sanity checks for the macho file.
	*/
	
	retval = -EINVAL;
	/* can we map it? */
	if (!file->f_op||!file->f_op->mmap) {
		printk(KERN_WARNING "ml_checkImage: binary file can't be mapped in \n");
		goto out_ret;
	}
	/* sane lc size? */
	if ((off_t)(macho_header_sz + head->sizeofcmds) > file_size) {
		printk(KERN_WARNING "ml_checkImage: malformed load commands size \n");
		goto out_ret;
	}
	if (head->filetype != MH_EXECUTE) {
		printk(KERN_WARNING "IGN:ml_checkImage: macho file is not executable \n");
		//goto out_ret;
	}
	
	/* Print some info about the macho file */
	if (_verboseLog)
		printk(KERN_WARNING "ml_checkImage: valid macho file: \n\tmagic: 0x%p \n\tsize: %d\n",
				(void*)head->magic,
				file_size);
	
	retval = 0;
	
out_ret:	
	return retval;
}
コード例 #8
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
                                        unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
    const struct mem_type *type;
    int err;
    unsigned long addr;
    struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
    if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
        return NULL;
#endif

    type = get_mem_type(mtype);
    if (!type)
        return NULL;

    size = PAGE_ALIGN(offset + size);

    read_lock(&vmlist_lock);
    for (area = vmlist; area; area = area->next) {
        if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
            break;
        if (!(area->flags & VM_ARM_STATIC_MAPPING))
            continue;
        if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
            continue;
        if (__phys_to_pfn(area->phys_addr) > pfn ||
                __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
            continue;

        read_unlock(&vmlist_lock);
        addr = (unsigned long)area->addr;
        addr += __pfn_to_phys(pfn) - area->phys_addr;
        return (void __iomem *) (offset + addr);
    }
    read_unlock(&vmlist_lock);

    if (WARN_ON(pfn_valid(pfn)))
        return NULL;

    area = get_vm_area_caller(size, VM_IOREMAP, caller);
    if (!area)
        return NULL;
    addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
    if (DOMAIN_IO == 0 &&
            (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
             cpu_is_xsc3()) && pfn >= 0x100000 &&
            !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_supersections(addr, pfn, size, type);
    } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_sections(addr, pfn, size, type);
    } else
#endif
        err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
                                 __pgprot(type->prot_pte));

    if (err) {
        vunmap((void *)addr);
        return NULL;
    }

    flush_cache_vmap(addr, addr + size);
    return (void __iomem *) (offset + addr);
}
コード例 #9
0
/* Return true if and only if the ARMv6 unaligned access model is in use. */
static bool cpu_is_v6_unaligned(void)
{
	return cpu_architecture() >= CPU_ARCH_ARMv6 && get_cr() & CR_U;
}
コード例 #10
0
ファイル: vfpmodule.c プロジェクト: eugene373/ApexqJB
/*
 * VFP support code initialisation.
 */
static int __init vfp_init(void)
{
	unsigned int vfpsid;
	unsigned int cpu_arch = cpu_architecture();
	struct cpumask cpus_curr, cpus;

	sched_getaffinity(current->pid, &cpus_curr);
	cpumask_clear(&cpus);
	cpumask_set_cpu(smp_processor_id(), &cpus);

	if (sched_setaffinity(current->pid, &cpus))
		pr_err("%s: set CPU affinity failed.\n", __func__);
	else
		pr_err("%s : affinity set to CPU %d\n"\
			, __func__, smp_processor_id());

	if (cpu_arch >= CPU_ARCH_ARMv6)
		vfp_enable(NULL);

	/*
	 * First check that there is a VFP that we can use.
	 * The handler is already setup to just log calls, so
	 * we just need to read the VFPSID register.
	 */
	vfp_vector = vfp_testing_entry;
	barrier();
	vfpsid = fmrx(FPSID);
	barrier();
	vfp_vector = vfp_null_entry;

	printk(KERN_INFO "VFP support v0.3: ");
	if (VFP_arch)
		printk("not present\n");
	else if (vfpsid & FPSID_NODOUBLE) {
		printk("no double precision support\n");
	} else {
		hotcpu_notifier(vfp_hotplug, 0);

		smp_call_function(vfp_enable, NULL, 1);

		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
		printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
			(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
			(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);

		vfp_vector = vfp_support_entry;

		thread_register_notifier(&vfp_notifier_block);
		vfp_pm_init();

		/*
		 * We detected VFP, and the support code is
		 * in place; report VFP support to userspace.
		 */
		elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
		if (VFP_arch >= 2) {
			elf_hwcap |= HWCAP_VFPv3;

			/*
			 * Check for VFPv3 D16 and VFPv4 D16.  CPUs in
			 * this configuration only have 16 x 64bit
			 * registers.
			 */
			if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
				elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
			else
				elf_hwcap |= HWCAP_VFPD32;
		}
#endif
		/*
		 * Check for the presence of the Advanced SIMD
		 * load/store instructions, integer and single
		 * precision floating point operations. Only check
		 * for NEON if the hardware has the MVFR registers.
		 */
		if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
#ifdef CONFIG_NEON
			if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
				elf_hwcap |= HWCAP_NEON;
#endif

			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000 ||
			    (read_cpuid_id() & 0xff00fc00) == 0x51000400)
				elf_hwcap |= HWCAP_VFPv4;
		}
	}

	if (sched_setaffinity(current->pid, &cpus_curr))
		pr_err("%s: Restore CPU affinity failed !!\n", __func__);
	else
		pr_err("%s : affinity restored to %x\n" \
			, __func__, *((int *)(cpus_curr.bits)));
	return 0;
}
コード例 #11
0
ファイル: vfpmodule.c プロジェクト: rrowicki/Chrono_Kernel-1
/*
 * VFP support code initialisation.
 */
static int __init vfp_init(void)
{
	unsigned int vfpsid;
	unsigned int cpu_arch = cpu_architecture();

	if (cpu_arch >= CPU_ARCH_ARMv6)
		on_each_cpu(vfp_enable, NULL, 1);

	/*
	 * First check that there is a VFP that we can use.
	 * The handler is already setup to just log calls, so
	 * we just need to read the VFPSID register.
	 */
	vfp_vector = vfp_testing_entry;
	barrier();
	vfpsid = fmrx(FPSID);
	barrier();
	vfp_vector = vfp_null_entry;

;
	if (VFP_arch)
;
	else if (vfpsid & FPSID_NODOUBLE) {
;
	} else {
		hotcpu_notifier(vfp_hotplug, 0);

		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
//		printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
//			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
//			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
//			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
//			(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
;

		vfp_vector = vfp_support_entry;

		thread_register_notifier(&vfp_notifier_block);
		vfp_pm_init();

		/*
		 * We detected VFP, and the support code is
		 * in place; report VFP support to userspace.
		 */
		elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
		if (VFP_arch >= 2) {
			elf_hwcap |= HWCAP_VFPv3;

			/*
			 * Check for VFPv3 D16 and VFPv4 D16.  CPUs in
			 * this configuration only have 16 x 64bit
			 * registers.
			 */
			if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
				elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
			else
				elf_hwcap |= HWCAP_VFPD32;
		}
#endif
		/*
		 * Check for the presence of the Advanced SIMD
		 * load/store instructions, integer and single
		 * precision floating point operations. Only check
		 * for NEON if the hardware has the MVFR registers.
		 */
		if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
#ifdef CONFIG_NEON
			if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
				elf_hwcap |= HWCAP_NEON;
#endif
#ifdef CONFIG_VFPv3
			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
				elf_hwcap |= HWCAP_VFPv4;
#endif
		}
	}

	return 0;
}