Ejemplo n.º 1
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
                                        unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
    const struct mem_type *type;
    int err;
    unsigned long addr;
    struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
    if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
        return NULL;
#endif

    type = get_mem_type(mtype);
    if (!type)
        return NULL;

    size = PAGE_ALIGN(offset + size);

    read_lock(&vmlist_lock);
    for (area = vmlist; area; area = area->next) {
        if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
            break;
        if (!(area->flags & VM_ARM_STATIC_MAPPING))
            continue;
        if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
            continue;
        if (__phys_to_pfn(area->phys_addr) > pfn ||
                __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
            continue;

        read_unlock(&vmlist_lock);
        addr = (unsigned long)area->addr;
        addr += __pfn_to_phys(pfn) - area->phys_addr;
        return (void __iomem *) (offset + addr);
    }
    read_unlock(&vmlist_lock);

    if (WARN_ON(pfn_valid(pfn)))
        return NULL;

    area = get_vm_area_caller(size, VM_IOREMAP, caller);
    if (!area)
        return NULL;
    addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
    if (DOMAIN_IO == 0 &&
            (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
             cpu_is_xsc3()) && pfn >= 0x100000 &&
            !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_supersections(addr, pfn, size, type);
    } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_sections(addr, pfn, size, type);
    } else
#endif
        err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
                                 __pgprot(type->prot_pte));

    if (err) {
        vunmap((void *)addr);
        return NULL;
    }

    flush_cache_vmap(addr, addr + size);
    return (void __iomem *) (offset + addr);
}
Ejemplo n.º 2
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;
#endif

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	/*
	 * Try to reuse one of the static mapping whenever possible.
	 */
	read_lock(&vmlist_lock);
	for (area = vmlist; area; area = area->next) {
		if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
			break;
		if (!(area->flags & VM_ARM_STATIC_MAPPING))
			continue;
		if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
			continue;
		if (__phys_to_pfn(area->phys_addr) > pfn ||
		    __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
			continue;
		/* we can drop the lock here as we know *area is static */
		read_unlock(&vmlist_lock);
		addr = (unsigned long)area->addr;
		addr += __pfn_to_phys(pfn) - area->phys_addr;
		return (void __iomem *) (offset + addr);
	}
	read_unlock(&vmlist_lock);

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}