예제 #1
0
void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
{
	struct vm_struct * area;
	unsigned long offset;
	phys_t last_addr;
	void * addr;

	phys_addr = fixup_bigphys_addr(phys_addr, size);

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;
#if !defined(CONFIG_BRCM_UPPER_768MB)
	/*
	 * Map uncached objects in the low 512mb of address space using KSEG1,
	 * otherwise map using page tables.
	 */
	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
	    flags == _CACHE_UNCACHED)
		return (void __iomem *) CKSEG1ADDR(phys_addr);
#endif

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);

		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vunmap(addr);
		return NULL;
	}

	return (void __iomem *) (offset + (char *)addr);
}
예제 #2
0
파일: ioremap.c 프로젝트: TitaniumBoy/lin
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
	void * addr;
	struct vm_struct * area;
	unsigned long offset, last_addr;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Map objects in the low 512mb of address space using KSEG1, otherwise
	 * map using page tables.
	 */
	if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1))
		return (void *) KSEG1ADDR(phys_addr);

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);
	   
		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
		vfree(addr);
		return NULL;
	}

	return (void *) (offset + (char *)addr);
}
예제 #3
0
파일: ioremap.c 프로젝트: 274914765/C
void __iomem *
__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
    void __iomem * addr;
    struct vm_struct * area;
    unsigned long offset, last_addr;
    pgprot_t pgprot;

    /* Don't allow wraparound or zero size */
    last_addr = phys_addr + size - 1;
    if (!size || last_addr < phys_addr)
        return NULL;

    /*
     * Map objects in the low 512mb of address space using KSEG1, otherwise
     * map using page tables.
     */
    if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1))
        return (void *) KSEG1ADDR(phys_addr);

    /*
     * Don't allow anybody to remap normal RAM that we're using..
     */
    if (phys_addr < virt_to_phys(high_memory)) {
        char *t_addr, *t_end;
        struct page *page;

        t_addr = __va(phys_addr);
        t_end = t_addr + (size - 1);

        for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
            if(!PageReserved(page))
                return NULL;
    }

    pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
              | _PAGE_WRITE | flags);

    /*
     * Mappings have to be page-aligned
     */
    offset = phys_addr & ~PAGE_MASK;
    phys_addr &= PAGE_MASK;
    size = PAGE_ALIGN(last_addr + 1) - phys_addr;

    /*
     * Ok, go for it..
     */
    area = get_vm_area(size, VM_IOREMAP);
    if (!area)
        return NULL;
    area->phys_addr = phys_addr;
    addr = (void __iomem *) area->addr;
    if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
                   phys_addr, pgprot)) {
        vunmap((void __force *) addr);
        return NULL;
    }

    return (void __iomem *) (offset + (char __iomem *)addr);
}
예제 #4
0
void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
{
	struct vm_struct * area;
	unsigned long offset;
	phys_t last_addr;
	void * addr;

	phys_addr = fixup_bigphys_addr(phys_addr, size);

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Map uncached objects in the low 512mb of address space using KSEG1,
	 * otherwise map using page tables.
	 */
	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
	    flags == _CACHE_UNCACHED)
		return (void *) CKSEG1ADDR(phys_addr);

#ifdef CONFIG_DISCONTIGMEM
#if defined ( CONFIG_MIPS_BCM97438 )
       if (IS_PA_UPPER_RAM(phys_addr) && flags == _CACHE_UNCACHED) {
               printk(KERN_ERR "Upper DDR at %08lx cannot be mapped uncached\n", phys_addr);
               return NULL;
       }
#elif defined ( CONFIG_MIPS_BCM7440 )
        if (IS_PA_UPPER_RAM(phys_addr) && (flags == _CACHE_UNCACHED)) {
               printk(KERN_ERR "Upper/High DDR at %08lx cannot be mapped uncached\n", phys_addr);
               return NULL;
       }

#endif

#endif

#ifndef CONFIG_DISCONTIGMEM
  #ifdef CONFIG_MIPS_BRCM97XXX

  #if defined( CONFIG_MIPS_BCM7038A0 )
	if (((phys_addr >= 0xd0000000) && (phys_addr <= 0xe060000b)))
		
  #elif defined( CONFIG_MIPS_BCM7038B0 ) || defined( CONFIG_MIPS_BCM7038C0 ) \
  	|| defined( CONFIG_MIPS_BCM7400 ) 
	if (((phys_addr >= 0xd0000000) && (phys_addr <= 0xf060000b)))
		
  #elif defined( CONFIG_MIPS_BCM3560 ) \
  	|| defined( CONFIG_MIPS_BCM7401 ) || defined( CONFIG_MIPS_BCM7402 ) \
	|| defined( CONFIG_MIPS_BCM7118 ) || defined( CONFIG_MIPS_BCM7403 ) \
	|| defined( CONFIG_MIPS_BCM7452 )
  	if (((((unsigned long) (phys_addr)) >= 0xd0000000) && (((unsigned long) (phys_addr)) <= 0xf060000b)) ||
		(((unsigned long) (phys_addr)) >= 0xff400000))
		
  #else
	if (phys_addr >= 0xffe00000)
  #endif
  
    	return (void *) (phys_addr);
  #endif
#else
  /* 97438 Discontiguous memory model */
  #if defined ( CONFIG_MIPS_BCM97438 )
        if (((phys_addr >= 0xd0000000) && (phys_addr < 0xe0000000)) ||
               ((phys_addr >= 0xf0000000) && (phys_addr <= 0xf060000b)))
                        return (void *) (phys_addr);

       /* else upper ram area is handled just like lower ram, handled below */
  #elif defined ( CONFIG_MIPS_BCM7440 )
        if ((phys_addr >= 0xd0000000) && (phys_addr < 0xd8000000))
                /* 128 MB of PCI-MEM */
                return (void *) (phys_addr);
        if ((phys_addr >= 0xf0000000) && (phys_addr < 0xf2000000))
                /* 32 MB of PCI-IO */
                return (void *) (0xf8000000 + (phys_addr - 0xf0000000));

  #else
       #error "Unsupported discontigmem platform"
  #endif

#endif

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);

		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vunmap(addr);
		return NULL;
	}

	return (void *) (offset + (char *)addr);
}
예제 #5
0
파일: ioremap.c 프로젝트: cilynx/dd-wrt
void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
{
	struct vm_struct * area;
	unsigned long offset;
	phys_t last_addr;
	void * addr;

	phys_addr = fixup_bigphys_addr(phys_addr, size);

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Map uncached objects in the low 512mb of address space using KSEG1,
	 * otherwise map using page tables.
	 */
	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
	    flags == _CACHE_UNCACHED)
		return (void __iomem *) CKSEG1ADDR(phys_addr);

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);

		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/* If we are in interrupt/Bottom half context, try to use fixed temporary
	 * map, which we can get atomically. However we are limited by one page only.
	 */
	if (in_interrupt() && (size <= PAGE_SIZE))
		return (void __iomem *) (kmap_atomic_pfn_prot(phys_addr >> PAGE_SHIFT,
			KM_PCIE, PAGE_KERNEL_UNCACHED) + offset);

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vunmap(addr);
		return NULL;
	}

	return (void __iomem *) (offset + (char *)addr);
}