Exemple #1
0
void *
ioremap(phys_addr_t addr, unsigned long size)
{
	phys_addr_t addr64 = fixup_bigphys_addr(addr, size);;

	return ioremap64(addr64, size);
}
Exemple #2
0
void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
{
	struct vm_struct * area;
	unsigned long offset;
	phys_t last_addr;
	void * addr;

	phys_addr = fixup_bigphys_addr(phys_addr, size);

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;
#if !defined(CONFIG_BRCM_UPPER_768MB)
	/*
	 * Map uncached objects in the low 512mb of address space using KSEG1,
	 * otherwise map using page tables.
	 */
	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
	    flags == _CACHE_UNCACHED)
		return (void __iomem *) CKSEG1ADDR(phys_addr);
#endif

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);

		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vunmap(addr);
		return NULL;
	}

	return (void __iomem *) (offset + (char *)addr);
}
Exemple #3
0
void pci_resource_to_user(const struct pci_dev *dev, int bar,
			  const struct resource *rsrc, resource_size_t *start,
			  resource_size_t *end)
{
	phys_addr_t size = resource_size(rsrc);

	*start = fixup_bigphys_addr(rsrc->start, size);
	*end = rsrc->start + size - 1;
}
Exemple #4
0
void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
{
	struct vm_struct * area;
	unsigned long offset;
	phys_t last_addr;
	void * addr;

	phys_addr = fixup_bigphys_addr(phys_addr, size);

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Map uncached objects in the low 512mb of address space using KSEG1,
	 * otherwise map using page tables.
	 */
	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
	    flags == _CACHE_UNCACHED)
		return (void *) CKSEG1ADDR(phys_addr);

#ifdef CONFIG_DISCONTIGMEM
#if defined ( CONFIG_MIPS_BCM97438 )
       if (IS_PA_UPPER_RAM(phys_addr) && flags == _CACHE_UNCACHED) {
               printk(KERN_ERR "Upper DDR at %08lx cannot be mapped uncached\n", phys_addr);
               return NULL;
       }
#elif defined ( CONFIG_MIPS_BCM7440 )
        if (IS_PA_UPPER_RAM(phys_addr) && (flags == _CACHE_UNCACHED)) {
               printk(KERN_ERR "Upper/High DDR at %08lx cannot be mapped uncached\n", phys_addr);
               return NULL;
       }

#endif

#endif

#ifndef CONFIG_DISCONTIGMEM
  #ifdef CONFIG_MIPS_BRCM97XXX

  #if defined( CONFIG_MIPS_BCM7038A0 )
	if (((phys_addr >= 0xd0000000) && (phys_addr <= 0xe060000b)))
		
  #elif defined( CONFIG_MIPS_BCM7038B0 ) || defined( CONFIG_MIPS_BCM7038C0 ) \
  	|| defined( CONFIG_MIPS_BCM7400 ) 
	if (((phys_addr >= 0xd0000000) && (phys_addr <= 0xf060000b)))
		
  #elif defined( CONFIG_MIPS_BCM3560 ) \
  	|| defined( CONFIG_MIPS_BCM7401 ) || defined( CONFIG_MIPS_BCM7402 ) \
	|| defined( CONFIG_MIPS_BCM7118 ) || defined( CONFIG_MIPS_BCM7403 ) \
	|| defined( CONFIG_MIPS_BCM7452 )
  	if (((((unsigned long) (phys_addr)) >= 0xd0000000) && (((unsigned long) (phys_addr)) <= 0xf060000b)) ||
		(((unsigned long) (phys_addr)) >= 0xff400000))
		
  #else
	if (phys_addr >= 0xffe00000)
  #endif
  
    	return (void *) (phys_addr);
  #endif
#else
  /* 97438 Discontiguous memory model */
  #if defined ( CONFIG_MIPS_BCM97438 )
        if (((phys_addr >= 0xd0000000) && (phys_addr < 0xe0000000)) ||
               ((phys_addr >= 0xf0000000) && (phys_addr <= 0xf060000b)))
                        return (void *) (phys_addr);

       /* else upper ram area is handled just like lower ram, handled below */
  #elif defined ( CONFIG_MIPS_BCM7440 )
        if ((phys_addr >= 0xd0000000) && (phys_addr < 0xd8000000))
                /* 128 MB of PCI-MEM */
                return (void *) (phys_addr);
        if ((phys_addr >= 0xf0000000) && (phys_addr < 0xf2000000))
                /* 32 MB of PCI-IO */
                return (void *) (0xf8000000 + (phys_addr - 0xf0000000));

  #else
       #error "Unsupported discontigmem platform"
  #endif

#endif

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);

		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vunmap(addr);
		return NULL;
	}

	return (void *) (offset + (char *)addr);
}
Exemple #5
0
void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
{
	struct vm_struct * area;
	unsigned long offset;
	phys_t last_addr;
	void * addr;

	phys_addr = fixup_bigphys_addr(phys_addr, size);

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Map uncached objects in the low 512mb of address space using KSEG1,
	 * otherwise map using page tables.
	 */
	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
	    flags == _CACHE_UNCACHED)
		return (void __iomem *) CKSEG1ADDR(phys_addr);

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);

		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/* If we are in interrupt/Bottom half context, try to use fixed temporary
	 * map, which we can get atomically. However we are limited by one page only.
	 */
	if (in_interrupt() && (size <= PAGE_SIZE))
		return (void __iomem *) (kmap_atomic_pfn_prot(phys_addr >> PAGE_SHIFT,
			KM_PCIE, PAGE_KERNEL_UNCACHED) + offset);

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vunmap(addr);
		return NULL;
	}

	return (void __iomem *) (offset + (char *)addr);
}