Esempio n. 1
0
int ioremap_page_range(unsigned long addr,
		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
	pgd_t *pgd;
	unsigned long start;
	unsigned long next;
	int err;

	might_sleep();
	BUG_ON(addr >= end);

	start = addr;
	phys_addr -= addr;
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);

	flush_cache_vmap(start, end);

	return err;
}
Esempio n. 2
0
/**
 * pcpu_post_map_flush - flush cache after mapping
 * @chunk: pcpu_chunk the regions to be flushed belong to
 * @page_start: page index of the first page to be flushed
 * @page_end: page index of the last page to be flushed + 1
 *
 * Pages [@page_start,@page_end) of @chunk have been mapped.  Flush
 * cache.
 *
 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
 * for the whole region.
 */
static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
				int page_start, int page_end)
{
	flush_cache_vmap(
		pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
		pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
Esempio n. 3
0
static int
remap_area_pages(unsigned long start, unsigned long phys_addr,
		 unsigned long size, unsigned long flags)
{
	unsigned long address = start;
	unsigned long end = start + size;
	int err = 0;
	pgd_t * dir;

	phys_addr -= address;
	dir = pgd_offset(&init_mm, address);
	BUG_ON(address >= end);
	spin_lock(&init_mm.page_table_lock);
	do {
		pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
		if (!pmd) {
			err = -ENOMEM;
			break;
		}
		if (remap_area_pmd(pmd, address, end - address,
					 phys_addr + address, flags)) {
			err = -ENOMEM;
			break;
		}

		address = (address + PGDIR_SIZE) & PGDIR_MASK;
		dir++;
	} while (address && (address < end));

	spin_unlock(&init_mm.page_table_lock);
	flush_cache_vmap(start, end);
	return err;
}
Esempio n. 4
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
#ifndef CONFIG_SQUASHFS_DEBUGGER_AUTO_DIAGNOSE
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;
#endif

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
Esempio n. 5
0
static int vmap_page_range(unsigned long start, unsigned long end,
			   pgprot_t prot, struct page **pages)
{
	int ret;

	ret = vmap_page_range_noflush(start, end, prot, pages);
	flush_cache_vmap(start, end);
	return ret;
}
Esempio n. 6
0
void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
	struct vm_struct *area;

	/*
	 * High mappings must be section aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped
	 */
	if (pfn_valid(pfn)) {
		WARN(1, "BUG: Your driver calls ioremap() on\n"
			"system memory.  This leads to architecturally\n"
			"unpredictable behaviour, and ioremap() will fail in\n"
			"the next kernel release. Please fix your driver.\n");
		return NULL;
	}

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
	if (!area)
		return NULL;
	addr = (unsigned long)area->addr;

	if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_UNICORE_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
		vunmap((void *)addr);
		return NULL;
	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
Esempio n. 7
0
int do_load_fw(const char* filename,
        unsigned long base_addr,
                size_t mem_size)
{
    mm_segment_t oldfs = {0};
	ssize_t len;
	unsigned long file_sz;
	loff_t pos = 0;
	struct file *flp = NULL;
	unsigned long buf_ptr = base_addr;

    printk("loading u-boot:%s to %08lx....\n",
               filename, buf_ptr);

    flp = filp_open(filename, O_RDONLY, S_IRWXU);
    if(IS_ERR(flp)) {
        printk("loader: open file failed");
        return -1;
    }

    file_sz = vfs_llseek(flp, 0, SEEK_END);
    if (file_sz > mem_size) {
		printk("rtloader: bin file too big. "
			"mem size: 0x%08x, bin file size: 0x%08lx\n",
			mem_size, file_sz);
		filp_close(flp, NULL);
		return -1;
	}
	printk("loader: bin file size: 0x%08lx\n", file_sz);
	vfs_llseek(flp, 0, SEEK_SET);

	oldfs = get_fs();
	set_fs(get_ds());
	while (file_sz > 0) {
		len = vfs_read(flp, (void __user __force*)buf_ptr, BUFF_SZ, &pos);
		if (len < 0) {
			pr_err("read %08lx error: %d\n", buf_ptr, len);
			set_fs(oldfs);
			filp_close(flp, NULL);
			return -1;
		}
		file_sz -= len;
		buf_ptr += len;
	}
	set_fs(oldfs);

	printk("done!\n");
	
	flush_cache_vmap(base_addr, mem_size);

	return 0;
}
Esempio n. 8
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 *
 * 'flags' are the extra L_PTE_ flags that you want to specify for this
 * mapping.  See <asm/pgtable.h> for more information.
 */
void __iomem *
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
		  unsigned int mtype)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

 	area = get_vm_area(size, VM_IOREMAP);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = remap_area_pages(addr, pfn, size, type);

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
static void ion_clean_and_unmap(unsigned long vaddr, pte_t *ptep,
				size_t size, bool memory_zero)
{
	int i;

	flush_cache_vmap(vaddr, vaddr + size);

	if (memory_zero)
		memset((void *)vaddr, 0, size);

	dmac_flush_range((void *)vaddr, (void *)vaddr + size);

	for (i = 0; i < (size / PAGE_SIZE); i++)
		pte_clear(&init_mm, (void *)vaddr + (i * PAGE_SIZE), ptep + i);

	flush_cache_vunmap(vaddr, vaddr + size);
	flush_tlb_kernel_range(vaddr, vaddr + size);
}
Esempio n. 10
0
/*
 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
 * will have pfns corresponding to the "pages" array.
 *
 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
 */
static int vmap_page_range(unsigned long start, unsigned long end,
				pgprot_t prot, struct page **pages)
{
	pgd_t *pgd;
	unsigned long next;
	unsigned long addr = start;
	int err = 0;
	int nr = 0;

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);
	flush_cache_vmap(start, end);

	if (unlikely(err))
		return err;
	return nr;
}
Esempio n. 11
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;
#endif

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	/*
	 * Try to reuse one of the static mapping whenever possible.
	 */
	read_lock(&vmlist_lock);
	for (area = vmlist; area; area = area->next) {
		if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
			break;
		if (!(area->flags & VM_ARM_STATIC_MAPPING))
			continue;
		if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
			continue;
		if (__phys_to_pfn(area->phys_addr) > pfn ||
		    __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
			continue;
		/* we can drop the lock here as we know *area is static */
		read_unlock(&vmlist_lock);
		addr = (unsigned long)area->addr;
		addr += __pfn_to_phys(pfn) - area->phys_addr;
		return (void __iomem *) (offset + addr);
	}
	read_unlock(&vmlist_lock);

#if 0 /* HACK - do allow RAM to be mapped, the problems are a bit overrated */
	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;
#endif

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
Esempio n. 12
0
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
				    void *start, void *end)
{
	void *page_addr;
	unsigned long user_page_addr;
	struct binder_lru_page *page;
	struct vm_area_struct *vma = NULL;
	struct mm_struct *mm = NULL;
	bool need_mm = false;

	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
		     "%d: %s pages %pK-%pK\n", alloc->pid,
		     allocate ? "allocate" : "free", start, end);

	if (end <= start)
		return 0;

	trace_binder_update_page_range(alloc, allocate, start, end);

	if (allocate == 0)
		goto free_range;

	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
		if (!page->page_ptr) {
			need_mm = true;
			break;
		}
	}

	if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
		mm = alloc->vma_vm_mm;

	if (mm) {
		down_read(&mm->mmap_sem);
		vma = alloc->vma;
	}

	if (!vma && need_mm) {
		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
			alloc->pid);
		goto err_no_vma;
	}

	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
		int ret;
		bool on_lru;
		size_t index;

		index = (page_addr - alloc->buffer) / PAGE_SIZE;
		page = &alloc->pages[index];

		if (page->page_ptr) {
			trace_binder_alloc_lru_start(alloc, index);

			on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
			WARN_ON(!on_lru);

			trace_binder_alloc_lru_end(alloc, index);
			continue;
		}

		if (WARN_ON(!vma))
			goto err_page_ptr_cleared;

		trace_binder_alloc_page_start(alloc, index);
		page->page_ptr = alloc_page(GFP_KERNEL |
					    __GFP_HIGHMEM |
					    __GFP_ZERO);
		if (!page->page_ptr) {
			pr_err("%d: binder_alloc_buf failed for page at %pK\n",
				alloc->pid, page_addr);
			goto err_alloc_page_failed;
		}
		page->alloc = alloc;
		INIT_LIST_HEAD(&page->lru);

		ret = map_kernel_range_noflush((unsigned long)page_addr,
					       PAGE_SIZE, PAGE_KERNEL,
					       &page->page_ptr);
		flush_cache_vmap((unsigned long)page_addr,
				(unsigned long)page_addr + PAGE_SIZE);
		if (ret != 1) {
			pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
			       alloc->pid, page_addr);
			goto err_map_kernel_failed;
		}
		user_page_addr =
			(uintptr_t)page_addr + alloc->user_buffer_offset;
		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
		if (ret) {
			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
			       alloc->pid, user_page_addr);
			goto err_vm_insert_page_failed;
		}

		if (index + 1 > alloc->pages_high)
			alloc->pages_high = index + 1;

		trace_binder_alloc_page_end(alloc, index);
		/* vm_insert_page does not seem to increment the refcount */
	}
	if (mm) {
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	return 0;

free_range:
	for (page_addr = end - PAGE_SIZE; page_addr >= start;
	     page_addr -= PAGE_SIZE) {
		bool ret;
		size_t index;

		index = (page_addr - alloc->buffer) / PAGE_SIZE;
		page = &alloc->pages[index];

		trace_binder_free_lru_start(alloc, index);

		ret = list_lru_add(&binder_alloc_lru, &page->lru);
		WARN_ON(!ret);

		trace_binder_free_lru_end(alloc, index);
		continue;

err_vm_insert_page_failed:
		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
		__free_page(page->page_ptr);
		page->page_ptr = NULL;
err_alloc_page_failed:
err_page_ptr_cleared:
		;
	}
err_no_vma:
	if (mm) {
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	return vma ? -ENOMEM : -ESRCH;
}
Esempio n. 13
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (pfn_valid(pfn)) {
		printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory.  This leads\n"
		       KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
		       KERN_WARNING "will fail in the next kernel release.  Please fix your driver.\n");
		WARN_ON(1);
	}

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
                                        unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
    const struct mem_type *type;
    int err;
    unsigned long addr;
    struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
    if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
        return NULL;
#endif

    type = get_mem_type(mtype);
    if (!type)
        return NULL;

    size = PAGE_ALIGN(offset + size);

    read_lock(&vmlist_lock);
    for (area = vmlist; area; area = area->next) {
        if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
            break;
        if (!(area->flags & VM_ARM_STATIC_MAPPING))
            continue;
        if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
            continue;
        if (__phys_to_pfn(area->phys_addr) > pfn ||
                __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
            continue;

        read_unlock(&vmlist_lock);
        addr = (unsigned long)area->addr;
        addr += __pfn_to_phys(pfn) - area->phys_addr;
        return (void __iomem *) (offset + addr);
    }
    read_unlock(&vmlist_lock);

    if (WARN_ON(pfn_valid(pfn)))
        return NULL;

    area = get_vm_area_caller(size, VM_IOREMAP, caller);
    if (!area)
        return NULL;
    addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
    if (DOMAIN_IO == 0 &&
            (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
             cpu_is_xsc3()) && pfn >= 0x100000 &&
            !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_supersections(addr, pfn, size, type);
    } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_sections(addr, pfn, size, type);
    } else
#endif
        err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
                                 __pgprot(type->prot_pte));

    if (err) {
        vunmap((void *)addr);
        return NULL;
    }

    flush_cache_vmap(addr, addr + size);
    return (void __iomem *) (offset + addr);
}
static void pil_femto_modem_unmap_fw_mem(void *vaddr, size_t size, void *data)
{
    flush_cache_vmap((unsigned long) vaddr, (unsigned long) vaddr + size);
    iounmap(vaddr);
    isb();
}