/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
	void * addr;
	struct vm_struct * area;
	unsigned long offset, last_addr;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	DEBUG_IOREMAP(("Get vm_area returns 0x%08x addr 0x%08x \n", \
		      area, area->addr));

	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
		vfree(addr);
		return NULL;
	}
	return (void *) (offset + (char *)addr);
}
Beispiel #2
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot)
{
	void __iomem * addr;
	struct vm_struct * area;
	unsigned long offset, last_addr;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = (void __iomem *)area->addr;
	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
			       phys_addr, prot)) {
		vfree((void __force *)addr);
		return NULL;
	}
	return (void __iomem *) (offset + (char __iomem *)addr);
}
Beispiel #3
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 *
 * 'flags' are the extra L_PTE_ flags that you want to specify for this
 * mapping.  See include/asm-arm/proc-armv/pgtable.h for more information.
 */
void __iomem *
__ioremap(unsigned long phys_addr, size_t size, unsigned long flags,
	  unsigned long align)
{
	void * addr;
	struct vm_struct * area;
	unsigned long offset, last_addr;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vfree(addr);
		return NULL;
	}
	return (void __iomem *) (offset + (char *)addr);
}
static int fmem_probe(struct platform_device *pdev)
{
	struct fmem_platform_data *pdata = pdev->dev.platform_data;

	fmem_data.phys = pdata->phys + pdata->reserved_size_low;
	fmem_data.size = pdata->size - pdata->reserved_size_low -
					pdata->reserved_size_high;
	fmem_data.reserved_size_low = pdata->reserved_size_low;
	fmem_data.reserved_size_high = pdata->reserved_size_high;

	if (!fmem_data.size)
		return -ENODEV;

	fmem_data.area = get_vm_area(fmem_data.size, VM_IOREMAP);
	if (!fmem_data.area)
		return -ENOMEM;

	if (!fmem_map_virtual_area(MT_DEVICE_CACHED)) {
		remove_vm_area(fmem_data.area->addr);
		return -ENOMEM;
	}
	pr_info("fmem phys %lx virt %p size %lx\n",
		fmem_data.phys, fmem_data.virt, fmem_data.size);

	spin_lock_init(&fmem_state_lock);

	return 0;
}
Beispiel #5
0
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
{
	unsigned long last_addr, addr;
	unsigned long offset = phys_addr & ~PAGE_MASK;
	struct vm_struct *area;

	pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_READ|_PAGE_WRITE
					|(__HEXAGON_C_DEV << 6));

	last_addr = phys_addr + size - 1;

	
	if (!size || (last_addr < phys_addr))
		return NULL;

	
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area(size, VM_IOREMAP);
	addr = (unsigned long)area->addr;

	if (ioremap_page_range(addr, addr+size, phys_addr, prot)) {
		vunmap((void *)addr);
		return NULL;
	}

	return (void __iomem *) (offset + addr);
}
Beispiel #6
0
static int fmem_probe(struct platform_device *pdev)
{
	struct fmem_platform_data *pdata = pdev->dev.platform_data;

#ifdef CONFIG_MEMORY_HOTPLUG
	fmem_section_start = pdata->phys >> PA_SECTION_SHIFT;
	fmem_section_end = (pdata->phys - 1 + pdata->size) >> PA_SECTION_SHIFT;
#endif
	fmem_data.phys = pdata->phys + pdata->reserved_size_low;
	fmem_data.size = pdata->size - pdata->reserved_size_low -
					pdata->reserved_size_high;
	fmem_data.reserved_size_low = pdata->reserved_size_low;
	fmem_data.reserved_size_high = pdata->reserved_size_high;

	if (!fmem_data.size)
		return -ENODEV;

	fmem_data.area = get_vm_area(fmem_data.size, VM_IOREMAP);
	if (!fmem_data.area)
		return -ENOMEM;

	if (!fmem_map_virtual_area(MT_DEVICE_CACHED)) {
		remove_vm_area(fmem_data.area->addr);
		return -ENOMEM;
	}
	pr_info("fmem phys %lx virt %p size %lx\n",
		fmem_data.phys, fmem_data.virt, fmem_data.size);

	spin_lock_init(&fmem_state_lock);

	return 0;
}
Beispiel #7
0
void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot)
{
	void __iomem * addr;
	struct vm_struct * area;
	unsigned long offset, last_addr;

	
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = (void __iomem *)area->addr;
	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
			       phys_addr, prot)) {
		vfree((void __force *)addr);
		return NULL;
	}
	return (void __iomem *) (offset + (char __iomem *)addr);
}
Beispiel #8
0
void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
{
	struct vm_struct * area;
	unsigned long offset;
	phys_t last_addr;
	void * addr;

	phys_addr = fixup_bigphys_addr(phys_addr, size);

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;
#if !defined(CONFIG_BRCM_UPPER_768MB)
	/*
	 * Map uncached objects in the low 512mb of address space using KSEG1,
	 * otherwise map using page tables.
	 */
	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
	    flags == _CACHE_UNCACHED)
		return (void __iomem *) CKSEG1ADDR(phys_addr);
#endif

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);

		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vunmap(addr);
		return NULL;
	}

	return (void __iomem *) (offset + (char *)addr);
}
Beispiel #9
0
/*
 * This function will allocate the requested contiguous pages and
 * map them into the kernel's vmalloc() space.  This is done so we
 * get unique mapping for these pages, outside of the kernel's 1:1
 * virtual:physical mapping.  This is necessary so we can cover large
 * portions of the kernel with single large page TLB entries, and
 * still get unique uncached pages for consistent DMA.
 */
void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
{
	struct vm_struct *area;
	unsigned long page, va, pa;
	void *ret;
	int order, err, i;

	if (in_interrupt())
		BUG();

	/* only allocate page size areas */
	size = PAGE_ALIGN(size);
	order = get_order(size);

	page = __get_free_pages(gfp, order);
	if (!page) {
		BUG();
		return NULL;
	}

	/* allocate some common virtual space to map the new pages */
	area = get_vm_area(size, VM_ALLOC);
	if (area == 0) {
		free_pages(page, order);
		return NULL;
	}
	va = VMALLOC_VMADDR(area->addr);
	ret = (void *) va;

	/* this gives us the real physical address of the first page */
	*dma_handle = pa = virt_to_bus((void *) page);

	/* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free
	 * all pages that were allocated.
	 */
	if (order > 0) {
		struct page *rpage = virt_to_page(page);

		for (i = 1; i < (1 << order); i++)
			set_page_count(rpage + i, 1);
	}

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE);

	if (err) {
		vfree((void *) va);
		return NULL;
	}

	/* we need to ensure that there are no cachelines in use, or worse dirty in this area
	 * - can't do until after virtual address mappings are created
	 */
	frv_cache_invalidate(va, va + size);

	return ret;
}
Beispiel #10
0
static int hs_init_socket(hs_socket_t *sp, int irq, unsigned long mem_base,
    	    unsigned int ctrl_base)
{
    	unsigned short v;
    	int i, err;

    	memset(sp, 0, sizeof(*sp));
	sp->irq = irq;
	sp->mem_base = mem_base;
	sp->mem_length = 4*HD64465_PCC_WINDOW;	/* 16MB */
	sp->ctrl_base = ctrl_base;
	
	for (i=0 ; i<MAX_IO_WIN ; i++)
	    sp->io_maps[i].map = i;
	for (i=0 ; i<MAX_WIN ; i++)
	    sp->mem_maps[i].map = i;
	
	if ((sp->io_vma = get_vm_area(HS_IO_MAP_SIZE, VM_IOREMAP)) == 0)
	    return -ENOMEM;

	hd64465_register_irq_demux(sp->irq, hs_irq_demux, sp);
	
    	if ((err = request_irq(sp->irq, hs_interrupt, SA_INTERRUPT, MODNAME, sp)) < 0)
	    return err;
    	if (request_mem_region(sp->mem_base, sp->mem_length, MODNAME) == 0) {
    	    sp->mem_base = 0;
	    return -ENOMEM;
	}


	/* According to section 3.2 of the PCMCIA standard, low-voltage
	 * capable cards must implement cold insertion, i.e. Vpp and
	 * Vcc set to 0 before card is inserted.
	 */
	/*hs_set_voltages(sp, 0, 0);*/
	
	/* hi-Z the outputs to the card and set 16MB map mode */
	v = hs_in(sp, GCR);
	v &= ~HD64465_PCCGCR_PCCT;  	/* memory-only card */
	hs_out(sp, v, GCR);

	v = hs_in(sp, GCR);
	v |= HD64465_PCCGCR_PDRV;   	/* enable outputs to card */
	hs_out(sp, v, GCR);

	v = hs_in(sp, GCR);
	v |= HD64465_PCCGCR_PMMOD; 	/* 16MB mapping mode */
	hs_out(sp, v, GCR);

	v = hs_in(sp, GCR);
	/* lowest 16MB of Common */
	v &= ~(HD64465_PCCGCR_PPA25|HD64465_PCCGCR_PPA24); 
	hs_out(sp, v, GCR);
	
	hs_reset_socket(sp, 1);

    	return 0;
}
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
	void * addr;
	struct vm_struct * area;
	unsigned long offset, last_addr;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

#if 0
	/* TODO: Here we can put checks for driver-writer abuse...  */

	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
	if (phys_addr >= 0xA0000 && last_addr < 0x100000)
		return phys_to_virt(phys_addr);

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);
	   
		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}
#endif

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
		vfree(addr);
		return NULL;
	}
	return (void *) (offset + (char *)addr);
}
Beispiel #12
0
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
	void * addr;
	struct vm_struct * area;
	unsigned long offset, last_addr;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Map objects in the low 512mb of address space using KSEG1, otherwise
	 * map using page tables.
	 */
	if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1))
		return (void *) KSEG1ADDR(phys_addr);

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);
	   
		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
		vfree(addr);
		return NULL;
	}

	return (void *) (offset + (char *)addr);
}
Beispiel #13
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
	void __iomem * addr;
	struct vm_struct * area;
	unsigned long offset, last_addr;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
	if (phys_addr >= 0xA0000 && last_addr < 0x100000)
		return (void __iomem *) phys_to_virt(phys_addr);

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);
	   
		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
	addr = (void __iomem *) area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vunmap((void __force *) addr);
		return NULL;
	}
	return (void __iomem *) (offset + (char __iomem *)addr);
}
Beispiel #14
0
/*
 * Re-map an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access physical
 * memory directly.
 */
void __iomem *__ioremap(unsigned long phys_addr, size_t size,
			unsigned long flags)
{
	unsigned long addr;
	struct vm_struct *area;
	unsigned long offset, last_addr;
	pgprot_t prot;

	/*
	 * Check if we can simply use the P4 segment. This area is
	 * uncacheable, so if caching/buffering is requested, we can't
	 * use it.
	 */
	if ((phys_addr >= P4SEG) && (flags == 0))
		return (void __iomem *)phys_addr;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * XXX: When mapping regular RAM, we'd better make damn sure
	 * it's never used for anything else.  But this is really the
	 * caller's responsibility...
	 */
	if (PHYSADDR(P2SEGADDR(phys_addr)) == phys_addr)
		return (void __iomem *)P2SEGADDR(phys_addr);

	/* Mappings have to be page-aligned */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	prot = __pgprot(_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_RW | _PAGE_DIRTY
			| _PAGE_ACCESSED | _PAGE_TYPE_SMALL | flags);

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
	addr = (unsigned long )area->addr;
	if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
		vunmap((void *)addr);
		return NULL;
	}

	return (void __iomem *)(offset + (char *)addr);
}
Beispiel #15
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 *
 * 'flags' are the extra L_PTE_ flags that you want to specify for this
 * mapping.  See <asm/pgtable.h> for more information.
 */
void __iomem *
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
		  unsigned int mtype)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

 	area = get_vm_area(size, VM_IOREMAP);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = remap_area_pages(addr, pfn, size, type);

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
static void* MV_SHM_ioremap(unsigned long phys_start, unsigned long size)
{
	struct vm_struct *area;

	area = get_vm_area(size, VM_IOREMAP);
	if (area == NULL)
		return NULL;

	area->phys_addr = phys_start;
	if (ioremap_page_range((unsigned long)area->addr,
		(unsigned long)area->addr + size, phys_start, PAGE_KERNEL)) {
		return NULL;
	}
	return area->addr;
}
Beispiel #17
0
void * vmalloc_prot(unsigned long size, pgprot_t prot)
{
	void * addr;
	struct vm_struct *area;

	size = PAGE_ALIGN(size);
	if (!size || size > (max_mapnr << PAGE_SHIFT))
		return NULL;
	area = get_vm_area(size);
	if (!area)
		return NULL;
	addr = area->addr;
	if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, prot)) {
		vfree(addr);
		return NULL;
	}
	return addr;
}
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
	void * addr;
	struct vm_struct * area;
	unsigned long offset, last_addr;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
	if (phys_addr >= 0xA0000 && last_addr < 0x100000)
		return phys_to_virt(phys_addr);

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory))
		return NULL;

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vunmap(addr);
		return NULL;
	}
	return (void *) (offset + (char *)addr);
}
Beispiel #19
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void __iomem *__ioremap(unsigned long phys_addr, size_t size,
			unsigned long flags)
{
	unsigned long addr;
	struct vm_struct *area;
	unsigned long offset, last_addr;
	pgprot_t prot;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/* Custom region addresses are accessible and uncached by default. */
	if (phys_addr >= LINSYSCUSTOM_BASE &&
	    phys_addr < (LINSYSCUSTOM_BASE + LINSYSCUSTOM_LIMIT))
		return (__force void __iomem *) phys_addr;

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;
	prot = __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY |
			_PAGE_ACCESSED | _PAGE_KERNEL | _PAGE_CACHE_WIN0 |
			flags);

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
	addr = (unsigned long) area->addr;
	if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
		vunmap((void *) addr);
		return NULL;
	}
	return (__force void __iomem *) (offset + (char *)addr);
}
Beispiel #20
0
struct vm_struct *alloc_vm_area(unsigned long size)
{
	struct vm_struct *area;

	area = get_vm_area(size, VM_IOREMAP);
	if (area == NULL)
		return NULL;

	/*
	 * This ensures that page tables are constructed for this region
	 * of kernel virtual address space and mapped into init_mm.
	 */
	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
				area->size, f, NULL)) {
		free_vm_area(area);
		return NULL;
	}

	return area;
}
Beispiel #21
0
void *__vmalloc(unsigned long size, int gfp_mask, unsigned long prot){
  /* Get free space from [VMALLOC_START,VMALLOC_END] */
  struct vm_struct *area = get_vm_area(size,VM_ALLOC);
  struct page **pages;
  if(!area) return 0;
  unsigned int nr_pages = size >> PAGE_SHIFT;
  unsigned array_size = (nr_pages * sizeof(struct page*));
  area->nr_pages = nr_pages;
  area->pages = pages = kmalloc(array_size,0);  
  memset(area->pages,0,array_size);
  unsigned long i;
  for(i = 0; i < area->nr_pages; i++){
    area->pages[i] = alloc_page(0);    
  }
  if(map_vm_area(area,prot,&pages)) goto fail;
  return area->addr;

 fail:
  vfree(area->addr);
  return 0;
}
Beispiel #22
0
/* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
			   pgprot_t home)
{
	void *addr;
	struct vm_struct *area;
	unsigned long offset, last_addr;
	pgprot_t pgprot;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/* Create a read/write, MMIO VA mapping homed at the requested shim. */
	pgprot = PAGE_KERNEL;
	pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
	pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP /* | other flags? */);
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
	addr = area->addr;
	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
			       phys_addr, pgprot)) {
		remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
		return NULL;
	}
	return (__force void __iomem *) (offset + (char *)addr);
}
Beispiel #23
0
/*
 * ioremap with access flags
 * Cache semantics wise it is same as ioremap - "forced" uncached.
 * However unline vanilla ioremap which bypasses ARC MMU for addresses in
 * ARC hardware uncached region, this one still goes thru the MMU as caller
 * might need finer access control (R/W/X)
 */
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
			   unsigned long flags)
{
	void __iomem *vaddr;
	struct vm_struct *area;
	unsigned long off, end;
	pgprot_t prot = __pgprot(flags);

	/* Don't allow wraparound, zero size */
	end = paddr + size - 1;
	if ((!size) || (end < paddr))
		return NULL;

	/* An early platform driver might end up here */
	if (!slab_is_available())
		return NULL;

	/* force uncached */
	prot = pgprot_noncached(prot);

	/* Mappings have to be page-aligned */
	off = paddr & ~PAGE_MASK;
	paddr &= PAGE_MASK;
	size = PAGE_ALIGN(end + 1) - paddr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	area->phys_addr = paddr;
	vaddr = (void __iomem *)area->addr;
	if (ioremap_page_range((unsigned long)vaddr,
			       (unsigned long)vaddr + size, paddr, prot)) {
		vunmap((void __force *)vaddr);
		return NULL;
	}
	return (void __iomem *)(off + (char __iomem *)vaddr);
}
Beispiel #24
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void *__ioremap(unsigned long phys_addr, unsigned long size,
		unsigned long flags)
{
	void * addr;
	struct vm_struct * area;
	unsigned long offset, last_addr;
	pgprot_t pgprot;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	pgprot = __pgprot(_PAGE_PRESENT  | _PAGE_READ   |
			  _PAGE_WRITE    | _PAGE_DIRTY  |
			  _PAGE_ACCESSED | _PAGE_SHARED | flags);

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	pr_debug("Get vm_area returns %p addr %p\n",area,area->addr);
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
	addr = area->addr;
	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
			       phys_addr, pgprot)) {
		vunmap(addr);
		return NULL;
	}
	return (void *) (offset + (char *)addr);
}
Beispiel #25
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 */
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
	void * addr;
	struct vm_struct * area;

	if (phys_addr < virt_to_phys(high_memory))
		return phys_to_virt(phys_addr);
	if (phys_addr & ~PAGE_MASK)
		return NULL;
	size = PAGE_ALIGN(size);
	if (!size || size > phys_addr + size)
		return NULL;
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
		vfree(addr);
		return NULL;
	}
	return addr;
}
Beispiel #26
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 */
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
	void * addr;
	struct vm_struct * area;

	if (phys_addr < virt_to_phys(high_memory))
		return phys_to_virt(phys_addr);
	if (phys_addr & ~PAGE_MASK)
		return NULL;
	size = PAGE_ALIGN(size);
	if (!size || size > phys_addr + size)
		return NULL;
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
			       phys_addr, __pgprot(flags))) {
		vfree(addr);
		return NULL;
	}
	return addr;
}
Beispiel #27
0
static inline struct vm_struct *get_io_area(unsigned long size)
{
	return get_vm_area(size, VM_IOREMAP);
}
Beispiel #28
0
static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
		unsigned long flags)
{
	unsigned long v, i;
	phys_addr_t p;
	int err;

	/*
	 * Choose an address to map it to.
	 * Once the vmalloc system is running, we use it.
	 * Before then, we use space going down from ioremap_base
	 * (ioremap_bot records where we're up to).
	 */
	p = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - p;

	/*
	 * Don't allow anybody to remap normal RAM that we're using.
	 * mem_init() sets high_memory so only do the check after that.
	 *
	 * However, allow remap of rootfs: TBD
	 */

	if (mem_init_done &&
		p >= memory_start && p < virt_to_phys(high_memory) &&
		!(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
		p < __virt_to_phys((phys_addr_t)__bss_stop))) {
		pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
			(unsigned long)p, __builtin_return_address(0));
		return NULL;
	}

	if (size == 0)
		return NULL;

	/*
	 * Is it already mapped? If the whole area is mapped then we're
	 * done, otherwise remap it since we want to keep the virt addrs for
	 * each request contiguous.
	 *
	 * We make the assumption here that if the bottom and top
	 * of the range we want are mapped then it's mapped to the
	 * same virt address (and this is contiguous).
	 *  -- Cort
	 */

	if (mem_init_done) {
		struct vm_struct *area;
		area = get_vm_area(size, VM_IOREMAP);
		if (area == NULL)
			return NULL;
		v = (unsigned long) area->addr;
	} else {
		v = (ioremap_bot -= size);
	}

	if ((flags & _PAGE_PRESENT) == 0)
		flags |= _PAGE_KERNEL;
	if (flags & _PAGE_NO_CACHE)
		flags |= _PAGE_GUARDED;

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(v + i, p + i, flags);
	if (err) {
		if (mem_init_done)
			vfree((void *)v);
		return NULL;
	}

	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
Beispiel #29
0
void __iomem *
__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
    void __iomem * addr;
    struct vm_struct * area;
    unsigned long offset, last_addr;
    pgprot_t pgprot;

    /* Don't allow wraparound or zero size */
    last_addr = phys_addr + size - 1;
    if (!size || last_addr < phys_addr)
        return NULL;

    /*
     * Map objects in the low 512mb of address space using KSEG1, otherwise
     * map using page tables.
     */
    if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1))
        return (void *) KSEG1ADDR(phys_addr);

    /*
     * Don't allow anybody to remap normal RAM that we're using..
     */
    if (phys_addr < virt_to_phys(high_memory)) {
        char *t_addr, *t_end;
        struct page *page;

        t_addr = __va(phys_addr);
        t_end = t_addr + (size - 1);

        for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
            if(!PageReserved(page))
                return NULL;
    }

    pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
              | _PAGE_WRITE | flags);

    /*
     * Mappings have to be page-aligned
     */
    offset = phys_addr & ~PAGE_MASK;
    phys_addr &= PAGE_MASK;
    size = PAGE_ALIGN(last_addr + 1) - phys_addr;

    /*
     * Ok, go for it..
     */
    area = get_vm_area(size, VM_IOREMAP);
    if (!area)
        return NULL;
    area->phys_addr = phys_addr;
    addr = (void __iomem *) area->addr;
    if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
                   phys_addr, pgprot)) {
        vunmap((void __force *) addr);
        return NULL;
    }

    return (void __iomem *) (offset + (char __iomem *)addr);
}
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
{
	unsigned long v, i;
	phys_addr_t p;
	int err;

	/*
	 * Choose an address to map it to.
	 * Once the vmalloc system is running, we use it.
	 * Before then, we use space going down from ioremap_base
	 * (ioremap_bot records where we're up to).
	 */
	p = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - p;

	/*
	 * If the address lies within the first 16 MB, assume it's in ISA
	 * memory space
	 */
	if (p < 16*1024*1024)
		p += _ISA_MEM_BASE;

	/*
	 * Don't allow anybody to remap normal RAM that we're using.
	 * mem_init() sets high_memory so only do the check after that.
	 */
	if (mem_init_done && (p < virt_to_phys(high_memory))) {
		printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
		       __builtin_return_address(0));
		return NULL;
	}

	if (size == 0)
		return NULL;

	/*
	 * Is it already mapped?  Perhaps overlapped by a previous
	 * BAT mapping.  If the whole area is mapped then we're done,
	 * otherwise remap it since we want to keep the virt addrs for
	 * each request contiguous.
	 *
	 * We make the assumption here that if the bottom and top
	 * of the range we want are mapped then it's mapped to the
	 * same virt address (and this is contiguous).
	 *  -- Cort
	 */
	if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
		goto out;

	if ((v = p_mapped_by_tlbcam(p)))
		goto out;

	if (mem_init_done) {
		struct vm_struct *area;
		area = get_vm_area(size, VM_IOREMAP);
		if (area == 0)
			return NULL;
		v = (unsigned long) area->addr;
	} else {
		v = (ioremap_bot -= size);
	}

	if ((flags & _PAGE_PRESENT) == 0)
		flags |= _PAGE_KERNEL;
	if (flags & _PAGE_NO_CACHE)
		flags |= _PAGE_GUARDED;

	/*
	 * Should check if it is a candidate for a BAT mapping
	 */

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(v+i, p+i, flags);
	if (err) {
		if (mem_init_done)
			vunmap((void *)v);
		return NULL;
	}

out:
	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}