void *fmem_map_virtual_area(int cacheability)
{
	unsigned long addr;
	const struct mem_type *type;
	int ret;

	addr = (unsigned long) fmem_data.area->addr;
	type = get_mem_type(cacheability);
	ret = ioremap_page_range(addr, addr + fmem_data.size,
			fmem_data.phys, __pgprot(type->prot_pte));
	if (ret)
		return ERR_PTR(ret);

	fmem_data.virt = fmem_data.area->addr;

	return fmem_data.virt;
}
static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
			      unsigned long flags)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep, pte;
	pgprot_t prot;

	pr_debug("shmedia_mapiopage pa %08lx va %08lx\n",  pa, va);

	if (!flags)
		flags = 1; /* 1 = CB0-1 device */

	pgdp = pgd_offset_k(va);
	if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
		pudp = (pud_t *)sh64_get_page();
		set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
	}

	pudp = pud_offset(pgdp, va);
	if (pud_none(*pudp) || !pud_present(*pudp)) {
		pmdp = (pmd_t *)sh64_get_page();
		set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
	}

	pmdp = pmd_offset(pudp, va);
	if (pmd_none(*pmdp) || !pmd_present(*pmdp)) {
		ptep = (pte_t *)sh64_get_page();
		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
	}

	prot = __pgprot(_PAGE_PRESENT | _PAGE_READ     | _PAGE_WRITE  |
			_PAGE_DIRTY   | _PAGE_ACCESSED | _PAGE_SHARED | flags);

	pte = pfn_pte(pa >> PAGE_SHIFT, prot);
	ptep = pte_offset_kernel(pmdp, va);

	if (!pte_none(*ptep) &&
	    pte_val(*ptep) != pte_val(pte))
		pte_ERROR(*ptep);

	set_pte(ptep, pte);

	flush_tlb_kernel_range(va, PAGE_SIZE);
}
Exemple #3
0
/*
 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 * conflicts.
 */
static int
ioremap_change_attr(unsigned long phys_addr, unsigned long size,
					unsigned long flags)
{
	int err = 0;
	if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
		unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
		unsigned long vaddr = (unsigned long) __va(phys_addr);

		/*
 		 * Must use a address here and not struct page because the phys addr
		 * can be a in hole between nodes and not have an memmap entry.
		 */
		err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
		if (!err)
			global_flush_tlb();
	}
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
	if (!enable)
		mutex_debug_check_no_locks_freed(page_address(page),
						 numpages * PAGE_SIZE);

	/* the return value is ignored - the calls cannot fail,
	 * large pages are disabled at boot time.
	 */
	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
	/* we should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu.
	 */
	__flush_tlb_all();
}
Exemple #5
0
void __init efi_pagetable_init(void)
{
	efi_memory_desc_t *md;
	unsigned long size;
	u64 start_pfn, end_pfn, pfn, vaddr;
	void *p;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	memset(efi_pgd, 0, sizeof(efi_pgd));

	dmi_check_system(efi_quirk_table);

	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
		md = p;
		if (!(md->attribute & EFI_MEMORY_RUNTIME))
			continue;

		start_pfn = md->phys_addr >> PAGE_SHIFT;
		size = md->num_pages << EFI_PAGE_SHIFT;
		end_pfn = PFN_UP(md->phys_addr + size);

		for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
			unsigned long val;

			vaddr = pfn << PAGE_SHIFT;
			pgd = efi_pgd + pgd_index(vaddr);
			pud = fill_pud(pgd, vaddr);
			pmd = fill_pmd(pud, vaddr);
			pte = fill_pte(pmd, vaddr);
			if (md->type == EFI_RUNTIME_SERVICES_CODE)
				val = __PAGE_KERNEL_EXEC;
			else
				val = __PAGE_KERNEL;

			if (!(md->attribute & EFI_MEMORY_WB))
				val |= _PAGE_CACHE_UC_MINUS;
			set_pte(pte, pfn_pte(pfn, __pgprot(val)));
		}
	}
	clone_pgd_range(efi_pgd + KERNEL_PGD_BOUNDARY,
	                swapper_pg_dir + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS);
	hotplug_memory_notifier(efi_memory_callback, EFI_CALLBACK_PRI);
}
void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
	struct vm_struct *area;

	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
		return NULL;

	if (pfn_valid(pfn)) {
		printk(KERN_WARNING "BUG: Your driver calls ioremap() on\n"
			"system memory.  This leads to architecturally\n"
			"unpredictable behaviour, and ioremap() will fail in\n"
			"the next kernel release. Please fix your driver.\n");
		WARN_ON(1);
	}

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
	if (!area)
		return NULL;
	addr = (unsigned long)area->addr;

	if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_UNICORE_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
		vunmap((void *)addr);
		return NULL;
	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
Exemple #7
0
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
                                  unsigned long phys_addr, unsigned long flags)
{
    unsigned long end;

    address &= ~PMD_MASK;
    end = address + size;
    if (end > PMD_SIZE)
        end = PMD_SIZE;
    do {
        if (!pte_none(*pte))
            printk("remap_area_pte: page already exists\n");
        set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT |
                                 _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
        address += PAGE_SIZE;
        phys_addr += PAGE_SIZE;
        pte++;
    } while (address < end);
}
Exemple #8
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void __iomem *__ioremap(unsigned long phys_addr, size_t size,
			unsigned long flags)
{
	unsigned long addr;
	struct vm_struct *area;
	unsigned long offset, last_addr;
	pgprot_t prot;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/* Custom region addresses are accessible and uncached by default. */
	if (phys_addr >= LINSYSCUSTOM_BASE &&
	    phys_addr < (LINSYSCUSTOM_BASE + LINSYSCUSTOM_LIMIT))
		return (__force void __iomem *) phys_addr;

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;
	prot = __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY |
			_PAGE_ACCESSED | _PAGE_KERNEL | _PAGE_CACHE_WIN0 |
			flags);

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
	addr = (unsigned long) area->addr;
	if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
		vunmap((void *) addr);
		return NULL;
	}
	return (__force void __iomem *) (offset + (char *)addr);
}
Exemple #9
0
static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                            gfp_t flag, struct dma_attrs *attrs)
{
    struct page *page, **map;
    pgprot_t pgprot;
    void *addr;
    int i, order;

    pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);

    size = PAGE_ALIGN(size);
    order = get_order(size);

    page = alloc_pages(flag, order);
    if (!page)
        return NULL;

    *handle = page_to_phys(page);
    map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
    if (!map) {
        __free_pages(page, order);
        return NULL;
    }
    split_page(page, order);

    order = 1 << order;
    size >>= PAGE_SHIFT;
    map[0] = page;
    for (i = 1; i < size; i++)
        map[i] = page + i;
    for (; i < order; i++)
        __free_page(page + i);
    pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
    if (CPU_IS_040_OR_060)
        pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
    else
        pgprot_val(pgprot) |= _PAGE_NOCACHE030;
    addr = vmap(map, size, VM_MAP, pgprot);
    kfree(map);

    return addr;
}
Exemple #10
0
/*
 * Add a PAGE mapping between VIRT and PHYS in domain
 * DOMAIN with protection PROT.  Note that due to the
 * way we map the PTEs, we must allocate two PTE_SIZE'd
 * blocks - one for the Linux pte table, and one for
 * the hardware pte table.
 */
static inline void
alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot)
{
	pmd_t *pmdp;
	pte_t *ptep;

	pmdp = pmd_offset(pgd_offset_k(virt), virt);

	if (pmd_none(*pmdp)) {
		pte_t *ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
						      sizeof(pte_t));

		ptep += PTRS_PER_PTE;

		set_pmd(pmdp, __mk_pmd(ptep, PMD_TYPE_TABLE | PMD_DOMAIN(domain)));
	}
	ptep = pte_offset(pmdp, virt);

	set_pte(ptep, mk_pte_phys(phys, __pgprot(prot)));
}
/**
 * The caller has to make sure that there is enough guard
 * vm area allocated, so that the allignment adjustment done here
 * does not overflow the vm area. Unlike ioremap, this function cant
 * take care of this, as the vm area is pre-allocated
 * by calling plat_get_vm_area.
 */
void __iomem *plat_ioremap_ns(unsigned long vaddr, unsigned long size,
		phys_addr_t phys_addr)
{
	unsigned long pfn;
	unsigned long offset;

	pfn = __phys_to_pfn(phys_addr);
	offset = phys_addr & ~PAGE_MASK;

	size = PAGE_ALIGN(offset + size);

	if (ioremap_page_range(vaddr, vaddr + size, __pfn_to_phys(pfn),
		__pgprot(PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
			L_PTE_SHARED))) {
			pr_err("ERROR: ns_ioremap failed\n");
			return (void __iomem *)NULL;
	}

	return (void __iomem *)(vaddr + offset);
}
static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
			  unsigned long phys_addr, const struct mem_type *type)
{
	pgprot_t prot = __pgprot(type->prot_pte);
	pte_t *pte;

	pte = pte_alloc_kernel(pmd, addr);
	if (!pte)
		return -ENOMEM;

	do {
		if (!pte_none(*pte))
			goto bad;

		set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
		phys_addr += PAGE_SIZE;
#ifdef CONFIG_PAGE_SIZE_64K
	} while (pte += PTE_STEP, addr += PAGE_SIZE, addr != end);
#else /* #ifdef CONFIG_PAGE_SIZE_64K */
	} while (pte++, addr += PAGE_SIZE, addr != end);
Exemple #13
0
int
map_page(unsigned long va, unsigned long pa, int flags)
{
	pmd_t *pd;
	pte_t *pg;
	int err = -ENOMEM;

	spin_lock(&init_mm.page_table_lock);
	/* Use upper 10 bits of VA to index the first level map */
	pd = pmd_offset(pgd_offset_k(va), va);
	/* Use middle 10 bits of VA to index the second-level map */
	pg = pte_alloc(&init_mm, pd, va);
	if (pg != 0) {
		err = 0;
		set_pte(pg, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
		if (mem_init_done)
			flush_HPTE(0, va, pg);
	}
	spin_unlock(&init_mm.page_table_lock);
	return err;
}
Exemple #14
0
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
			enum pci_mmap_state mmap_state, int write_combine)
{
	unsigned long prot;

	/* Leave vm_pgoff as-is, the PCI space address is the physical
	 * address on this platform.
	 */
	prot = pgprot_val(vma->vm_page_prot);
	vma->vm_page_prot = __pgprot(prot);

	/* Write-combine setting is ignored, it is changed via the mtrr
	 * interfaces on this platform.
	 */
	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
			     vma->vm_end - vma->vm_start,
			     vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
Exemple #15
0
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
	unsigned long addr;

	if (begin >= end)
		return;

	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
	for (addr = begin; addr < end; addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		memset((void *)(addr & ~(PAGE_SIZE-1)),
			POISON_FREE_INITMEM, PAGE_SIZE);
		if (addr >= __START_KERNEL_map)
			change_page_attr_addr(addr, 1, __pgprot(0));
		free_page(addr);
		totalram_pages++;
	}
	if (addr > __START_KERNEL_map)
		global_flush_tlb();
}
Exemple #16
0
/*
 * ioremap with access flags
 * Cache semantics wise it is same as ioremap - "forced" uncached.
 * However unline vanilla ioremap which bypasses ARC MMU for addresses in
 * ARC hardware uncached region, this one still goes thru the MMU as caller
 * might need finer access control (R/W/X)
 */
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
			   unsigned long flags)
{
	void __iomem *vaddr;
	struct vm_struct *area;
	unsigned long off, end;
	pgprot_t prot = __pgprot(flags);

	/* Don't allow wraparound, zero size */
	end = paddr + size - 1;
	if ((!size) || (end < paddr))
		return NULL;

	/* An early platform driver might end up here */
	if (!slab_is_available())
		return NULL;

	/* force uncached */
	prot = pgprot_noncached(prot);

	/* Mappings have to be page-aligned */
	off = paddr & ~PAGE_MASK;
	paddr &= PAGE_MASK;
	size = PAGE_ALIGN(end + 1) - paddr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	area->phys_addr = paddr;
	vaddr = (void __iomem *)area->addr;
	if (ioremap_page_range((unsigned long)vaddr,
			       (unsigned long)vaddr + size, paddr, prot)) {
		vunmap((void __force *)vaddr);
		return NULL;
	}
	return (void __iomem *)(off + (char __iomem *)vaddr);
}
static void __init hisi_cma_dev_init(void)
{
	struct cma *cma;
	struct page *page = NULL;
	int i;
#ifdef CONFIG_HISI_KERNELDUMP
	int k;
	struct page *tmp_page = NULL;
#endif

	for(i = 0 ; i < hisi_cma_area_count; i++){
		cma = hisi_cma_areas[i].cma_area;
		if (cma == NULL)
			continue;
		dev_set_cma_area(&hisi_cma_dev[i], cma);
		hisi_cma_areas[i].dev = &hisi_cma_dev[i];
		/* when is 0 mean it is static*/
		if (hisi_cma_areas[i].dynamic == 0){

			page = dma_alloc_from_contiguous(&hisi_cma_dev[i], cma->count, SZ_1M);

#ifdef CONFIG_HISI_KERNELDUMP
			if (page != NULL) {
				tmp_page = page;
				for (k=0;k < cma->count;k++){
					SetPageMemDump(tmp_page);
					tmp_page++;
				}
			}
#endif
			if (hisi_cma_areas[i].sec_prot){
				create_mapping_late(__pfn_to_phys(cma->base_pfn),
									__phys_to_virt(__pfn_to_phys(cma->base_pfn)),
									cma->count * PAGE_SIZE, __pgprot(PROT_DEVICE_nGnRE));
			}
			pr_err("%s:%d page addr 0x%llx size %lu\n", __func__,
					__LINE__, page_to_phys(page),(cma->count<<PAGE_SHIFT)/SZ_1M);
		}
	}
}
Exemple #18
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void *__ioremap(unsigned long phys_addr, unsigned long size,
		unsigned long flags)
{
	void * addr;
	struct vm_struct * area;
	unsigned long offset, last_addr;
	pgprot_t pgprot;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	pgprot = __pgprot(_PAGE_PRESENT  | _PAGE_READ   |
			  _PAGE_WRITE    | _PAGE_DIRTY  |
			  _PAGE_ACCESSED | _PAGE_SHARED | flags);

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	pr_debug("Get vm_area returns %p addr %p\n",area,area->addr);
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
	addr = area->addr;
	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
			       phys_addr, pgprot)) {
		vunmap(addr);
		return NULL;
	}
	return (void *) (offset + (char *)addr);
}
void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
				void *virt_base, unsigned long flags)
{
	int ret;
	unsigned int offset = buffer->priv_phys - phys_base;
	unsigned long start = ((unsigned long)virt_base) + offset;
	const struct mem_type *type = ION_IS_CACHED(flags) ?
				get_mem_type(MT_DEVICE_CACHED) :
				get_mem_type(MT_DEVICE);

	if (phys_base > buffer->priv_phys)
		return NULL;


	ret = ioremap_page_range(start, start + buffer->size,
			buffer->priv_phys, __pgprot(type->prot_pte));

	if (!ret)
		return (void *)start;
	else
		return NULL;
}
Exemple #20
0
int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
		unsigned long pfn, unsigned long size, pgprot_t prot)
{
	int error = 0;
	pgd_t * dir;
	unsigned long beg = from;
	unsigned long end = from + size;
	struct mm_struct *mm = vma->vm_mm;
	int space = GET_IOSPACE(pfn);
	unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
	unsigned long phys_base;

	phys_base = offset | (((unsigned long) space) << 32UL);

	/* See comment in mm/memory.c remap_pfn_range */
	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
	vma->vm_pgoff = phys_base >> PAGE_SHIFT;

	prot = __pgprot(pg_iobits);
	offset -= from;
	dir = pgd_offset(mm, from);
	flush_cache_range(vma, beg, end);

	while (from < end) {
		pud_t *pud = pud_alloc(mm, dir, from);
		error = -ENOMEM;
		if (!pud)
			break;
		error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
		if (error)
			break;
		from = (from + PGDIR_SIZE) & PGDIR_MASK;
		dir++;
	}

	flush_tlb_range(vma, beg, end);
	return error;
}
Exemple #21
0
/*
 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
 * the vmalloc space using normal page tables, though the size of
 * pages encoded in the PTEs can be different
 */
int __meminit vmemmap_create_mapping(unsigned long start,
				     unsigned long page_size,
				     unsigned long phys)
{
	/* Create a PTE encoding without page size */
	unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
		_PAGE_KERNEL_RW;

	/* PTEs only contain page size encodings up to 32M */
	BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);

	/* Encode the size in the PTE */
	flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;

	/* For each PTE for that area, map things. Note that we don't
	 * increment phys because all PTEs are of the large size and
	 * thus must have the low bits clear
	 */
	for (i = 0; i < page_size; i += PAGE_SIZE)
		BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags)));

	return 0;
}
Exemple #22
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 */
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
	void * addr;
	struct vm_struct * area;

	if (phys_addr < virt_to_phys(high_memory))
		return phys_to_virt(phys_addr);
	if (phys_addr & ~PAGE_MASK)
		return NULL;
	size = PAGE_ALIGN(size);
	if (!size || size > phys_addr + size)
		return NULL;
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
			       phys_addr, __pgprot(flags))) {
		vfree(addr);
		return NULL;
	}
	return addr;
}
static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
			  unsigned long phys_addr, const struct mem_type *type)
{
	pgprot_t prot = __pgprot(type->prot_pte);
	pte_t *pte;

	pte = pte_alloc_kernel(pmd, addr);
	if (!pte)
		return -ENOMEM;

	do {
		if (!pte_none(*pte)) {
			printk(KERN_CRIT "remap_area_pte: page already exists\n");
			BUG();
			return -EFAULT;
		}

		set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot),
			    type->prot_pte_ext);
		phys_addr += PAGE_SIZE;
	} while (pte++, addr += PAGE_SIZE, addr != end);
	return 0;
}
Exemple #24
0
static int remap_area_pages(unsigned long start, unsigned long pfn,
			    unsigned long size, unsigned long flags)
{
	unsigned long addr = start;
	unsigned long next, end = start + size;
	unsigned long phys_addr = __pfn_to_phys(pfn);
	pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
				 L_PTE_DIRTY | L_PTE_WRITE | flags);
	pgd_t *pgd;
	int err = 0;

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		err = remap_area_pmd(pgd, addr, next, phys_addr, prot);
		if (err)
			break;
		phys_addr += next - addr;
	} while (pgd++, addr = next, addr != end);

	return err;
}
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
	unsigned long phys_addr, unsigned long flags)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;

	DEBUG_IOREMAP(("    %s: pte %x address %x size %x phys_addr %x\n", \
			__FUNCTION__,pte,address,size,phys_addr));

	do {
		if (!pte_none(*pte))
			printk("remap_area_pte: page already exists\n");
		set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT |
					_PAGE_READ | _PAGE_WRITE | 
					_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
		address += PAGE_SIZE;
		phys_addr += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
}
Exemple #26
0
static int mmap_mem(struct file * file, struct vm_area_struct * vma)
{
	unsigned long prot;
#if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;

	vma->vm_page_prot = phys_mem_access_prot(file, offset,
						 vma->vm_end - vma->vm_start,
						 vma->vm_page_prot);
#elif defined(pgprot_noncached)
	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
	int uncached;

	uncached = uncached_access(file, offset);
	if (uncached)
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif

	prot = pgprot_val(vma->vm_page_prot);
	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
	if (prot & _PAGE_WRITE)
		prot = prot | _PAGE_FILE | _PAGE_VALID | _PAGE_DIRTY;
	else
		prot = prot | _PAGE_FILE | _PAGE_VALID;
	prot &= ~_PAGE_PRESENT;
	vma->vm_page_prot = __pgprot(prot);

	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
	if (remap_pfn_range(vma,
			    vma->vm_start,
			    vma->vm_pgoff,
			    vma->vm_end-vma->vm_start,
			    vma->vm_page_prot))
		return -EAGAIN;
	return 0;
}
Exemple #27
0
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
        unsigned long phys_addr, unsigned long flags)
{
        unsigned long end;
        unsigned long pfn;

        address &= ~PMD_MASK;
        end = address + size;
        if (end > PMD_SIZE)
                end = PMD_SIZE;
	if (address >= end)
		BUG();
        pfn = phys_addr >> PAGE_SHIFT;
        do {
                if (!pte_none(*pte)) {
                        printk("remap_area_pte: page already exists\n");
			BUG();
		}
                set_pte(pte, pfn_pte(pfn, __pgprot(flags)));
                address += PAGE_SIZE;
                pfn++;
                pte++;
        } while (address && (address < end));
}
extern inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
	unsigned long phys_addr, unsigned long flags)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	if (address >= end)
		BUG();
	do {
		if (!pte_none(*pte)) {
			printk("remap_area_pte: page already exists\n");
			BUG();
		}
		set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | __READABLE | 
							     __WRITEABLE | _PAGE_GLOBAL |
							     _PAGE_KERNEL | flags)));
		address += PAGE_SIZE;
		phys_addr += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
}
Exemple #29
0
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long address;
	int i;

	for (i = 0; i < numpages; i++) {
		address = page_to_phys(page + i);
		pgd = pgd_offset_k(address);
		pud = pud_offset(pgd, address);
		pmd = pmd_offset(pud, address);
		pte = pte_offset_kernel(pmd, address);
		if (!enable) {
			ptep_invalidate(&init_mm, address, pte);
			continue;
		}
		*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
		/* Flush cpu write queue. */
		mb();
	}
}
int ioremap_page(unsigned long virt, unsigned long phys,
		 const struct mem_type *mtype)
{
	return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
				  __pgprot(mtype->prot_pte));
}