/**
 * Maps the allocation into ring-0.
 *
 * This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
 *
 * Contiguous mappings that isn't in 'high' memory will already be mapped into kernel
 * space, so we'll use that mapping if possible. If execute access is required, we'll
 * play safe and do our own mapping.
 *
 * @returns IPRT status code.
 * @param   pMemLnx     The linux memory object to map.
 * @param   fExecutable Whether execute access is required.
 */
static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool fExecutable)
{
    int rc = VINF_SUCCESS;

    /*
     * Choose mapping strategy.
     */
    bool fMustMap = fExecutable
                 || !pMemLnx->fContiguous;
    if (!fMustMap)
    {
        size_t iPage = pMemLnx->cPages;
        while (iPage-- > 0)
            if (PageHighMem(pMemLnx->apPages[iPage]))
            {
                fMustMap = true;
                break;
            }
    }

    Assert(!pMemLnx->Core.pv);
    Assert(!pMemLnx->fMappedToRing0);

    if (fMustMap)
    {
        /*
         * Use vmap - 2.4.22 and later.
         */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
        pgprot_t fPg;
        pgprot_val(fPg) = _PAGE_PRESENT | _PAGE_RW;
# ifdef _PAGE_NX
        if (!fExecutable)
            pgprot_val(fPg) |= _PAGE_NX;
# endif

# ifdef VM_MAP
        pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
# else
        pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);
# endif
        if (pMemLnx->Core.pv)
            pMemLnx->fMappedToRing0 = true;
        else
            rc = VERR_MAP_FAILED;
#else   /* < 2.4.22 */
        rc = VERR_NOT_SUPPORTED;
#endif
    }
    else
    {
        /*
         * Use the kernel RAM mapping.
         */
        pMemLnx->Core.pv = phys_to_virt(page_to_phys(pMemLnx->apPages[0]));
        Assert(pMemLnx->Core.pv);
    }

    return rc;
}
Beispiel #2
0
safl_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
#endif
{
    unsigned long size;

    if (vma->vm_offset != 0)
        return -EINVAL;

    size = vma->vm_end - vma->vm_start;
    if (size > FLASH_SZ)
        return -EINVAL;

    pgprot_val(vma->vm_page_prot) = pgprot_noncached(pgprot_val(vma->vm_page_prot));

#if LINUX_VERSION_CODE >= 0x020100
    vma->vm_flags |= VM_IO;
#endif

    if (remap_page_range(vma->vm_start, flash_addr, size, vma->vm_page_prot))
        return -EAGAIN;

#if LINUX_VERSION_CODE < 0x020100
    vma->vm_inode = inode;
    inode->i_count++;
#endif

    return 0;
}
static struct
agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
						unsigned long offset,
					    int size, pgprot_t page_prot)
{
	struct agp_segment_priv *seg;
	int num_segments, i;
	off_t pg_start;
	size_t pg_count;

	pg_start = offset / 4096;
	pg_count = size / 4096;
	seg = *(client->segments);
	num_segments = client->num_segments;

	for (i = 0; i < client->num_segments; i++) {
		if ((seg[i].pg_start == pg_start) &&
		    (seg[i].pg_count == pg_count) &&
		    (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
			return seg + i;
		}
	}

	return NULL;
}
Beispiel #4
0
static int 
fb_mmap(struct inode *inode, struct file *file, struct vm_area_struct * vma)
{
	struct fb_ops *fb = registered_fb[GET_FB_IDX(inode->i_rdev)];
	struct fb_fix_screeninfo fix;

	if (! fb)
		return -ENODEV;
	fb->fb_get_fix(&fix, PROC_CONSOLE());
	if ((vma->vm_end - vma->vm_start + vma->vm_offset) > fix.smem_len)
		return -EINVAL;
	vma->vm_offset += fix.smem_start;
	if (vma->vm_offset & ~PAGE_MASK)
		return -ENXIO;
	if (m68k_is040or060) {
		pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
		/* Use write-through cache mode */
		pgprot_val(vma->vm_page_prot) |= _PAGE_CACHE040W;
	}
	if (remap_page_range(vma->vm_start, vma->vm_offset,
			     vma->vm_end - vma->vm_start, vma->vm_page_prot))
		return -EAGAIN;
	vma->vm_inode = inode;
	inode->i_count++;
	return 0;
}
Beispiel #5
0
/**
 * pci_mmap_legacy_page_range - map legacy memory space to userland
 * @bus: bus whose legacy space we're mapping
 * @vma: vma passed in by mmap
 *
 * Map legacy memory space for this device back to userspace using a machine
 * vector to get the base address.
 */
int
pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
{
	unsigned long size = vma->vm_end - vma->vm_start;
	pgprot_t prot;
	char *addr;

	/*
	 * Avoid attribute aliasing.  See Documentation/ia64/aliasing.txt
	 * for more details.
	 */
	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
		return -EINVAL;
	prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
				    vma->vm_page_prot);
	if (pgprot_val(prot) != pgprot_val(pgprot_noncached(vma->vm_page_prot)))
		return -EINVAL;

	addr = pci_get_legacy_mem(bus);
	if (IS_ERR(addr))
		return PTR_ERR(addr);

	vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
	vma->vm_page_prot = prot;

	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
			    size, vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
Beispiel #6
0
static int set_up_temporary_text_mapping(pgd_t *pgd_base)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	pgd = pgd_base + pgd_index(restore_jump_address);

	pmd = resume_one_md_table_init(pgd);
	if (!pmd)
		return -ENOMEM;

	if (boot_cpu_has(X86_FEATURE_PSE)) {
		set_pmd(pmd + pmd_index(restore_jump_address),
		__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
	} else {
		pte = resume_one_page_table_init(pmd);
		if (!pte)
			return -ENOMEM;
		set_pte(pte + pte_index(restore_jump_address),
		__pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
	}

	return 0;
}
Beispiel #7
0
/*
 * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
 * executable, everything else can be mapped with the XN bits
 * set. Also take the new (optional) RO/XP bits into account.
 */
static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
{
	u64 attr = md->attribute;
	u32 type = md->type;

	if (type == EFI_MEMORY_MAPPED_IO)
		return PROT_DEVICE_nGnRE;

	if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
		      "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
		/*
		 * If the region is not aligned to the page size of the OS, we
		 * can not use strict permissions, since that would also affect
		 * the mapping attributes of the adjacent regions.
		 */
		return pgprot_val(PAGE_KERNEL_EXEC);

	/* R-- */
	if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
	    (EFI_MEMORY_XP | EFI_MEMORY_RO))
		return pgprot_val(PAGE_KERNEL_RO);

	/* R-X */
	if (attr & EFI_MEMORY_RO)
		return pgprot_val(PAGE_KERNEL_ROX);

	/* RW- */
	if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE)
		return pgprot_val(PAGE_KERNEL);

	/* RWX */
	return pgprot_val(PAGE_KERNEL_EXEC);
}
static int change_memory_common(unsigned long addr, int numpages,
				pgprot_t set_mask, pgprot_t clear_mask)
{
	unsigned long start = addr;
	unsigned long size = PAGE_SIZE*numpages;
	unsigned long end = start + size;
	struct vm_struct *area;
	int i;

	if (!PAGE_ALIGNED(addr)) {
		start &= PAGE_MASK;
		end = start + size;
		WARN_ON_ONCE(1);
	}

	/*
	 * Kernel VA mappings are always live, and splitting live section
	 * mappings into page mappings may cause TLB conflicts. This means
	 * we have to ensure that changing the permission bits of the range
	 * we are operating on does not result in such splitting.
	 *
	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
	 * Those are guaranteed to consist entirely of page mappings, and
	 * splitting is never needed.
	 *
	 * So check whether the [addr, addr + size) interval is entirely
	 * covered by precisely one VM area that has the VM_ALLOC flag set.
	 */
	area = find_vm_area((void *)addr);
	if (!area ||
	    end > (unsigned long)area->addr + area->size ||
	    !(area->flags & VM_ALLOC))
		return -EINVAL;

	if (!numpages)
		return 0;

	/*
	 * If we are manipulating read-only permissions, apply the same
	 * change to the linear mapping of the pages that back this VM area.
	 */
	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
			    pgprot_val(clear_mask) == PTE_RDONLY)) {
		for (i = 0; i < area->nr_pages; i++) {
			__change_memory_common((u64)page_address(area->pages[i]),
					       PAGE_SIZE, set_mask, clear_mask);
		}
	}

	/*
	 * Get rid of potentially aliasing lazily unmapped vm areas that may
	 * have permissions set that deviate from the ones we are setting here.
	 */
	vm_unmap_aliases();

	return __change_memory_common(start, size, set_mask, clear_mask);
}
Beispiel #9
0
/*
 * This function gets called on a break in a continuous series
 * of PTE entries; the next one is different so we need to
 * print what we collected so far.
 */
static void note_page(struct seq_file *m, struct pg_state *st,
		      pgprot_t new_prot, int level)
{
	pgprotval_t prot, cur;
	static const char units[] = "KMGTPE";

	/*
	 * If we have a "break" in the series, we need to flush the state that
	 * we have now. "break" is either changing perms, levels or
	 * address space marker.
	 */
	prot = pgprot_val(new_prot) & PTE_FLAGS_MASK;
	cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK;

	if (!st->level) {
		/* First entry */
		st->current_prot = new_prot;
		st->level = level;
		st->marker = address_markers;
		seq_printf(m, "---[ %s ]---\n", st->marker->name);
	} else if (prot != cur || level != st->level ||
		   st->current_address >= st->marker[1].start_address) {
		const char *unit = units;
		unsigned long delta;
		int width = sizeof(unsigned long) * 2;

		/*
		 * Now print the actual finished series
		 */
		seq_printf(m, "0x%0*lx-0x%0*lx   ",
			   width, st->start_address,
			   width, st->current_address);

		delta = (st->current_address - st->start_address) >> 10;
		while (!(delta & 1023) && unit[1]) {
			delta >>= 10;
			unit++;
		}
		seq_printf(m, "%9lu%c ", delta, *unit);
		printk_prot(m, st->current_prot, st->level);

		/*
		 * We print markers for special areas of address space,
		 * such as the start of vmalloc space etc.
		 * This helps in the interpretation.
		 */
		if (st->current_address >= st->marker[1].start_address) {
			st->marker++;
			seq_printf(m, "---[ %s ]---\n", st->marker->name);
		}

		st->start_address = st->current_address;
		st->current_prot = new_prot;
		st->level = level;
	}
}
Beispiel #10
0
static int cramfs_mmap(struct file *file, struct vm_area_struct *vma)
{
	unsigned long address, length;
	struct inode *inode = file->f_dentry->d_inode;
	struct super_block *sb = inode->i_sb;

	/* this is only used in the case of read-only maps for XIP */

	if (vma->vm_flags & VM_WRITE)
		return generic_file_mmap(file, vma);

	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
		return -EINVAL;

	address  = PAGE_ALIGN(sb->CRAMFS_SB_LINEAR_PHYS_ADDR + OFFSET(inode));
	address += vma->vm_pgoff << PAGE_SHIFT;

	length = vma->vm_end - vma->vm_start;

	if (length > inode->i_size)
		length = inode->i_size;

	length = PAGE_ALIGN(length);


#if 0
	/* Doing the following makes it slower and more broken.  bdl */
	/*
	 * Accessing memory above the top the kernel knows about or
	 * through a file pointer that was marked O_SYNC will be
	 * done non-cached.
	 */
	vma->vm_page_prot =
		__pgprot((pgprot_val(vma->vm_page_prot) & ~_CACHE_MASK)
			| _CACHE_UNCACHED);
#endif

	/*
	 * Don't dump addresses that are not real memory to a core file.
	 */
	vma->vm_flags |= VM_IO;
	flush_tlb_page(vma, address);
	if (remap_page_range(vma->vm_start, address, length,
			     vma->vm_page_prot))
		return -EAGAIN;

#ifdef DEBUG_CRAMFS_XIP
	printk("cramfs_mmap: mapped %s at 0x%08lx, length %lu to vma 0x%08lx"
		", page_prot 0x%08lx\n",
		file->f_dentry->d_name.name, address, length,
		vma->vm_start, pgprot_val(vma->vm_page_prot));
#endif

	return 0;
}
Beispiel #11
0
/* Remap IO memory, the same way as remap_pfn_range(), but use
 * the obio memory space.
 *
 * They use a pgprot that sets PAGE_IO and does not check the
 * mem_map table as this is independent of normal memory.
 */
static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
				      unsigned long address,
				      unsigned long size,
				      unsigned long offset, pgprot_t prot,
				      int space)
{
	unsigned long end;

	/* clear hack bit that was used as a write_combine side-effect flag */
	offset &= ~0x1UL;
	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		pte_t entry;
		unsigned long curend = address + PAGE_SIZE;
		
		entry = mk_pte_io(offset, prot, space);
		if (!(address & 0xffff)) {
			if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
				entry = mk_pte_io(offset,
						  __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
						  space);
				curend = address + 0x400000;
				offset += 0x400000;
			} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
				entry = mk_pte_io(offset,
						  __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
						  space);
				curend = address + 0x80000;
				offset += 0x80000;
			} else if (!(offset & 0xfffe) && end >= address + 0x10000) {
				entry = mk_pte_io(offset,
						  __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
						  space);
				curend = address + 0x10000;
				offset += 0x10000;
			} else
				offset += PAGE_SIZE;
		} else
			offset += PAGE_SIZE;

		do {
			BUG_ON(!pte_none(*pte));
			set_pte_at(mm, address, pte, entry);
			address += PAGE_SIZE;
			pte_val(entry) += PAGE_SIZE;
			pte++;
		} while (address < curend);
	} while (address < end);
}
Beispiel #12
0
static int __init setup_areas(struct spu *spu)
{
	struct table {char* name; unsigned long addr; unsigned long size;};
	unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO));

	spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr,
					   sizeof(struct spe_shadow),
					   shadow_flags);
	if (!spu_pdata(spu)->shadow) {
		pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
		goto fail_ioremap;
	}

	spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys,
		LS_SIZE, pgprot_val(pgprot_noncached_wc(__pgprot(0))));

	if (!spu->local_store) {
		pr_debug("%s:%d: ioremap local_store failed\n",
			__func__, __LINE__);
		goto fail_ioremap;
	}

	spu->problem = ioremap(spu->problem_phys,
		sizeof(struct spu_problem));

	if (!spu->problem) {
		pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
		goto fail_ioremap;
	}

	spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
		sizeof(struct spu_priv2));

	if (!spu->priv2) {
		pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
		goto fail_ioremap;
	}

	dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr,
		spu->problem_phys, spu->local_store_phys,
		spu_pdata(spu)->shadow_addr);
	dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2,
		(unsigned long)spu->problem, (unsigned long)spu->local_store,
		(unsigned long)spu_pdata(spu)->shadow);

	return 0;

fail_ioremap:
	spu_unmap(spu);

	return -ENOMEM;
}
static int
__change_page_attr(struct page *page, pgprot_t prot)
{ 
	pte_t *kpte; 
	unsigned long address;
	struct page *kpte_page;

	BUG_ON(PageHighMem(page));
	address = (unsigned long)page_address(page);

	kpte = lookup_address(address);
	if (!kpte)
		return -EINVAL;
	kpte_page = virt_to_page(kpte);
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
		if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
			set_pte_atomic(kpte, mk_pte(page, prot)); 
		} else {
			pgprot_t ref_prot;
			struct page *split;

			ref_prot =
			((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
				? PAGE_KERNEL_EXEC : PAGE_KERNEL;
			split = split_large_page(address, prot, ref_prot);
			if (!split)
				return -ENOMEM;
			set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
			kpte_page = split;
		}
		page_private(kpte_page)++;
	} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
		BUG_ON(page_private(kpte_page) == 0);
		page_private(kpte_page)--;
	} else
		BUG();

	/*
	 * If the pte was reserved, it means it was created at boot
	 * time (not via split_large_page) and in turn we must not
	 * replace it with a largepage.
	 */
	if (!PageReserved(kpte_page)) {
		if (cpu_has_pse && (page_private(kpte_page) == 0)) {
			ClearPagePrivate(kpte_page);
			list_add(&kpte_page->lru, &df_list);
			revert_page(kpte_page, address);
		}
	}
	return 0;
} 
Beispiel #14
0
/* Remap IO memory, the same way as remap_page_range(), but use
 * the obio memory space.
 *
 * They use a pgprot that sets PAGE_IO and does not check the
 * mem_map table as this is independent of normal memory.
 *
 * As a special hack if the lowest bit of offset is set the
 * side-effect bit will be turned off.  This is used as a
 * performance improvement on FFB/AFB. -DaveM
 */
static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
	unsigned long offset, pgprot_t prot, int space)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		pte_t oldpage;
		pte_t entry;
		unsigned long curend = address + PAGE_SIZE;
		
		entry = mk_pte_io((offset & ~(0x1UL)), prot, space);
		if (!(address & 0xffff)) {
			if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
				entry = mk_pte_io((offset & ~(0x1UL)),
						  __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
						  space);
				curend = address + 0x400000;
				offset += 0x400000;
			} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
				entry = mk_pte_io((offset & ~(0x1UL)),
						  __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
						  space);
				curend = address + 0x80000;
				offset += 0x80000;
			} else if (!(offset & 0xfffe) && end >= address + 0x10000) {
				entry = mk_pte_io((offset & ~(0x1UL)),
						  __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
						  space);
				curend = address + 0x10000;
				offset += 0x10000;
			} else
				offset += PAGE_SIZE;
		} else
			offset += PAGE_SIZE;

		if (offset & 0x1UL)
			pte_val(entry) &= ~(_PAGE_E);
		do {
			oldpage = *pte;
			pte_clear(pte);
			set_pte(pte, entry);
			forget_pte(oldpage);
			address += PAGE_SIZE;
			pte++;
		} while (address < curend);
	} while (address < end);
}
Beispiel #15
0
/*
 * Map 'pfn' using fixed map 'type' and protections 'prot'
 */
void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
{
	/*
	 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
	 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
	 * MTRR is UC or WC.  UC_MINUS gets the real intention, of the
	 * user, which is "WC if the MTRR is WC, UC if you can't do that."
	 */
	if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
		prot = PAGE_KERNEL_UC_MINUS;

	return kmap_atomic_prot_pfn(pfn, type, prot);
}
Beispiel #16
0
/*
 * Print a readable form of a pgprot_t to the seq_file
 */
static void printk_prot(struct seq_file *m, pgprot_t prot, int level)
{
	pgprotval_t pr = pgprot_val(prot);
	static const char * const level_name[] =
		{ "cr3", "pgd", "pud", "pmd", "pte" };

	if (!pgprot_val(prot)) {
		/* Not present */
		seq_printf(m, "                          ");
	} else {
		if (pr & _PAGE_USER)
			seq_printf(m, "USR ");
		else
			seq_printf(m, "    ");
		if (pr & _PAGE_RW)
			seq_printf(m, "RW ");
		else
			seq_printf(m, "ro ");
		if (pr & _PAGE_PWT)
			seq_printf(m, "PWT ");
		else
			seq_printf(m, "    ");
		if (pr & _PAGE_PCD)
			seq_printf(m, "PCD ");
		else
			seq_printf(m, "    ");

		/* Bit 9 has a different meaning on level 3 vs 4 */
		if (level <= 3) {
			if (pr & _PAGE_PSE)
				seq_printf(m, "PSE ");
			else
				seq_printf(m, "    ");
		} else {
			if (pr & _PAGE_PAT)
				seq_printf(m, "pat ");
			else
				seq_printf(m, "    ");
		}
		if (pr & _PAGE_GLOBAL)
			seq_printf(m, "GLB ");
		else
			seq_printf(m, "    ");
		if (pr & _PAGE_NX)
			seq_printf(m, "NX ");
		else
			seq_printf(m, "x  ");
	}
	seq_printf(m, "%s\n", level_name[level]);
}
/*
 * Create a virtual address mapping for physical pages of memory.
 *
 * This needs to handle requrests for both the EMGD display driver
 * and the IMG 2D/3D drivers.
 *
 * If the page offset falls below the 256MB limit for display,
 * then map display memory. If above, route to the IMG handler.
 */
int emgd_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *file_priv;
	drm_emgd_priv_t *emgd_priv;
	gmm_chunk_t *chunk;
	unsigned long offset;

	/*
	 * re-direct offsets beyond the 256MB display range to PVRMMap
	 */
	if (vma->vm_pgoff > DRM_PSB_FILE_PAGE_OFFSET) {
		EMGD_DEBUG("emgd_mmap: Calling PVRMMap().");
		return PVRMMap(filp, vma);
	}

	file_priv = (struct drm_file *) filp->private_data;
	emgd_priv = (drm_emgd_priv_t *)file_priv->minor->dev->dev_private;
	offset = vma->vm_pgoff << PAGE_SHIFT;

	/*
	 * Look up the buffer in the gmm chunk list based on offset
	 * and size.
	 */
	/* chunk = emgd_priv->context->dispatch->gmm_get_chunk(vma->vm_pgoff);*/
	chunk = gmm_get_chunk(emgd_priv->context, offset);
	if (chunk == NULL) {
		printk(KERN_ERR "emgd_mmap: Failed to find memory at 0x%lx.", offset);
	}

	/*
	 * Fill in the vma
	 */
	vma->vm_ops = &emgd_vm_ops;
	vma->vm_private_data = chunk;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
#else
	vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
	pgprot_val(vma->vm_page_prot) =
		pgprot_val(vma->vm_page_prot) | _PAGE_CACHE_MODE_UC_MINUS;
#else
	pgprot_val(vma->vm_page_prot) =
		pgprot_val(vma->vm_page_prot) | _PAGE_CACHE_UC_MINUS;
#endif

	return 0;
}
Beispiel #18
0
/*
 * Print a readable form of a pgprot_t to the seq_file
 */
static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
{
	pgprotval_t pr = pgprot_val(prot);
	static const char * const level_name[] =
		{ "cr3", "pgd", "pud", "pmd", "pte" };

	if (!pgprot_val(prot)) {
		/* Not present */
		pt_dump_cont_printf(m, dmsg, "                              ");
	} else {
		if (pr & _PAGE_USER)
			pt_dump_cont_printf(m, dmsg, "USR ");
		else
			pt_dump_cont_printf(m, dmsg, "    ");
		if (pr & _PAGE_RW)
			pt_dump_cont_printf(m, dmsg, "RW ");
		else
			pt_dump_cont_printf(m, dmsg, "ro ");
		if (pr & _PAGE_PWT)
			pt_dump_cont_printf(m, dmsg, "PWT ");
		else
			pt_dump_cont_printf(m, dmsg, "    ");
		if (pr & _PAGE_PCD)
			pt_dump_cont_printf(m, dmsg, "PCD ");
		else
			pt_dump_cont_printf(m, dmsg, "    ");

		/* Bit 7 has a different meaning on level 3 vs 4 */
		if (level <= 3 && pr & _PAGE_PSE)
			pt_dump_cont_printf(m, dmsg, "PSE ");
		else
			pt_dump_cont_printf(m, dmsg, "    ");
		if ((level == 4 && pr & _PAGE_PAT) ||
		    ((level == 3 || level == 2) && pr & _PAGE_PAT_LARGE))
			pt_dump_cont_printf(m, dmsg, "PAT ");
		else
			pt_dump_cont_printf(m, dmsg, "    ");
		if (pr & _PAGE_GLOBAL)
			pt_dump_cont_printf(m, dmsg, "GLB ");
		else
			pt_dump_cont_printf(m, dmsg, "    ");
		if (pr & _PAGE_NX)
			pt_dump_cont_printf(m, dmsg, "NX ");
		else
			pt_dump_cont_printf(m, dmsg, "x  ");
	}
	pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
}
Beispiel #19
0
/*
 * Associate a virtual page frame with a given physical page frame
 * and protection flags for that frame.
 */
static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;

    pgd = swapper_pg_dir + pgd_index(vaddr);
    if (pgd_none(*pgd)) {
        BUG();
        return;
    }
    pud = pud_offset(pgd, vaddr);
    if (pud_none(*pud)) {
        BUG();
        return;
    }
    pmd = pmd_offset(pud, vaddr);
    if (pmd_none(*pmd)) {
        BUG();
        return;
    }
    pte = pte_offset_kernel(pmd, vaddr);
    if (pgprot_val(flags))
        /* <pfn,flags> stored as-is, to permit clearing entries */
        set_pte(pte, pfn_pte(pfn, flags));
    else
        pte_clear(&init_mm, vaddr, pte);

    /*
     * It's enough to flush this one mapping.
     * (PGE mappings get flushed as well)
     */
    __flush_tlb_one(vaddr);
}
Beispiel #20
0
static int _MDrv_VPool_MMap(struct file *filp, struct vm_area_struct *vma)
{
     unsigned long prot;
    //printk("mpool_base=%x\n",mpool_base);
    //printk("linux_base=%x\n",linux_base);

       size_t size = vma->vm_end - vma->vm_start;

       VPOOL_DPRINTK(printk("vpool map range 0x%08x~0x%08x\n", vma->vm_pgoff<<PAGE_SHIFT, size));

	if (!valid_video_phys_addr_range(vma->vm_pgoff, size))
		return -EINVAL;

	prot = pgprot_val(vma->vm_page_prot);
	prot = (prot & ~_CACHE_MASK) | _CACHE_CACHABLE_NONCOHERENT;
	vma->vm_page_prot = __pgprot(prot);



	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
	if (remap_pfn_range(vma,
			    vma->vm_start,
			    vma->vm_pgoff,
			    size,
			    vma->vm_page_prot)) {
		return -EAGAIN;
	}

	return 0;

}
Beispiel #21
0
/*
 * This should probably be per-architecture in <asm/pgtable.h>
 */
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
{
	unsigned long prot = pgprot_val(_prot);

#if defined(__i386__) || defined(__x86_64__)
	/* On PPro and successors, PCD alone doesn't always mean 
	    uncached because of interactions with the MTRRs. PCD | PWT
	    means definitely uncached. */ 
	if (boot_cpu_data.x86 > 3)
		prot |= _PAGE_PCD | _PAGE_PWT;
#elif defined(__powerpc__)
	prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
#elif defined(__mc68000__)
#ifdef SUN3_PAGE_NOCACHE
	if (MMU_IS_SUN3)
		prot |= SUN3_PAGE_NOCACHE;
	else
#endif
	if (MMU_IS_851 || MMU_IS_030)
		prot |= _PAGE_NOCACHE030;
	/* Use no-cache mode, serialized */
	else if (MMU_IS_040 || MMU_IS_060)
		prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S;
#endif

	return __pgprot(prot);
}
Beispiel #22
0
static int __init lboxre2_devices_setup(void)
{
	u32 cf0_io_base;	/* Boot CF base address */
	pgprot_t prot;
	unsigned long paddrbase, psize;

	/* open I/O area window */
	paddrbase = virt_to_phys((void*)PA_AREA5_IO);
	psize = PAGE_SIZE;
	prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
	cf0_io_base = (u32)ioremap_prot(paddrbase, psize, pgprot_val(prot));
	if (!cf0_io_base) {
		printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ );
		return -ENOMEM;
	}

	cf_ide_resources[0].start += cf0_io_base ;
	cf_ide_resources[0].end   += cf0_io_base ;
	cf_ide_resources[1].start += cf0_io_base ;
	cf_ide_resources[1].end   += cf0_io_base ;

	return platform_add_devices(lboxre2_devices,
			ARRAY_SIZE(lboxre2_devices));

}
Beispiel #23
0
static int vperfctr_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct vperfctr *perfctr;

#ifdef CONFIG_ARM
#define _PAGE_RW	L_PTE_WRITE
#endif
	/* Only allow read-only mapping of first page. */
	if ((vma->vm_end - vma->vm_start) != PAGE_SIZE ||
	    vma->vm_pgoff != 0 ||
	    (pgprot_val(vma->vm_page_prot) & _PAGE_RW) ||
	    (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
		return -EPERM;
	perfctr = filp->private_data;
	if (!perfctr)
		return -EPERM;
	/* 2.6.29-rc1 changed arch/x86/mm/pat.c to WARN_ON when
	   remap_pfn_range() is applied to plain RAM pages.
	   Comments there indicate that one should set_memory_wc()
	   before the remap, but that doesn't silence the WARN_ON.
	   Luckily vm_insert_page() works without complaints. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
	return vm_insert_page(vma, vma->vm_start, virt_to_page((unsigned long)perfctr));
#else
	return remap_pfn_range(vma, vma->vm_start,
			       virt_to_phys(perfctr) >> PAGE_SHIFT,
			       PAGE_SIZE, vma->vm_page_prot);
#endif
}
Beispiel #24
0
static int __init landisk_devices_setup(void)
{
	pgprot_t prot;
	unsigned long paddrbase;
	void *cf_ide_base;

	/* open I/O area window */
	paddrbase = virt_to_phys((void *)PA_AREA5_IO);
	prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
	cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot));
	if (!cf_ide_base) {
		printk("allocate_cf_area : can't open CF I/O window!\n");
		return -ENOMEM;
	}

	/* IDE cmd address : 0x1f0-0x1f7 and 0x3f6 */
	cf_ide_resources[0].start = (unsigned long)cf_ide_base + 0x40;
	cf_ide_resources[0].end   = (unsigned long)cf_ide_base + 0x40 + 0x0f;
	cf_ide_resources[0].flags = IORESOURCE_IO;
	cf_ide_resources[1].start = (unsigned long)cf_ide_base + 0x2c;
	cf_ide_resources[1].end   = (unsigned long)cf_ide_base + 0x2c + 0x03;
	cf_ide_resources[1].flags = IORESOURCE_IO;
	cf_ide_resources[2].start = IRQ_FATA;
	cf_ide_resources[2].flags = IORESOURCE_IRQ;

	return platform_add_devices(landisk_devices,
				    ARRAY_SIZE(landisk_devices));
}
Beispiel #25
0
/*!
 * V4L2 interface - mmap function
 *
 * @param file          structure file *
 *
 * @param vma           structure vm_area_struct *
 *
 * @return status       0 Success, EINTR busy lock error,
 *                      ENOBUFS remap_page error
 */
static int mxc_v4l2out_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct video_device *dev = file->private_data;
	unsigned long start = vma->vm_start;
	unsigned long size = vma->vm_end - vma->vm_start;
	int res = 0;
	vout_data *vout = video_get_drvdata(dev);

	/* make this _really_ smp-safe */
	if (down_interruptible(&vout->busy_lock))
		return -EINTR;

	/* make buffers write-thru cacheable */
	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) &
				     ~L_PTE_BUFFERABLE);

	if (remap_pfn_range(vma, start, vma->vm_pgoff, size, vma->vm_page_prot)) {
		pr_debug("mxc_mmap(V4L)i - remap_pfn_range failed\n");
		res = -ENOBUFS;
		goto mxc_mmap_exit;
	}

      mxc_mmap_exit:
	up(&vout->busy_lock);
	return res;
}
/**
 * Converts from RTMEM_PROT_* to Linux PAGE_*.
 *
 * @returns Linux page protection constant.
 * @param   fProt       The IPRT protection mask.
 * @param   fKernel     Whether it applies to kernel or user space.
 */
static pgprot_t rtR0MemObjLinuxConvertProt(unsigned fProt, bool fKernel)
{
    switch (fProt)
    {
        default:
            AssertMsgFailed(("%#x %d\n", fProt, fKernel));
        case RTMEM_PROT_NONE:
            return PAGE_NONE;

        case RTMEM_PROT_READ:
            return fKernel ? PAGE_KERNEL_RO         : PAGE_READONLY;

        case RTMEM_PROT_WRITE:
        case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
            return fKernel ? PAGE_KERNEL            : PAGE_SHARED;

        case RTMEM_PROT_EXEC:
        case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
            if (fKernel)
            {
                pgprot_t fPg = MY_PAGE_KERNEL_EXEC;
                pgprot_val(fPg) &= ~_PAGE_RW;
                return fPg;
            }
            return PAGE_READONLY_EXEC;
#else
            return fKernel ? MY_PAGE_KERNEL_EXEC    : PAGE_READONLY_EXEC;
#endif

        case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
        case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_READ:
            return fKernel ? MY_PAGE_KERNEL_EXEC    : PAGE_SHARED_EXEC;
    }
}
Beispiel #27
0
static int sram_mmap(struct file *file, struct vm_area_struct *vma)
{
    unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
    unsigned long size = vma->vm_end - vma->vm_start;
    int reg_num = iminor(file->f_path.dentry->d_inode);

    dev_dbg(dev, "sram_mmap: rdev major = %d, rdev minor = %d\n",
            imajor(file->f_path.dentry->d_inode), reg_num);

    if (offset + size > region_table[reg_num].size)
        return -EINVAL;

    offset += region_table[reg_num].start_phys;

    /* here we have to set the "right" value */
    vma->vm_pgoff = offset >> PAGE_SHIFT;
    pgprot_val( vma->vm_page_prot ) |= ( _PAGE_USER );

    /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
    if (remap_pfn_range(vma,
                        vma->vm_start,
                        vma->vm_pgoff,
                        size,
                        vma->vm_page_prot))
        return -EAGAIN;

    return 0;
}
Beispiel #28
0
static int viadev_mmap(struct file* filp, struct vm_area_struct* vma)
{
    unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
    unsigned long phy = di.io_base_phy + off;
    unsigned long vsize = vma->vm_end - vma->vm_start;
    unsigned long psize = di.size - off;

    if (vsize > psize) return -EINVAL;

    vma->vm_pgoff = phy >> PAGE_SHIFT;

    if (boot_cpu_data.x86 > 3)
        pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;

    vma->vm_flags |= VM_IO | VM_RESERVED | VM_DONTEXPAND;

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
    if (io_remap_page_range(vma,vma->vm_start, phy, vsize, vma->vm_page_prot))
       return -EAGAIN;
#else
    if (io_remap_page_range(vma->vm_start, phy, vsize, vma->vm_page_prot))
       return -EAGAIN;
#endif

    return 0;
}
Beispiel #29
0
/*
 * This should probably be per-architecture in <asm/pgtable.h>
 */
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
{
	unsigned long prot = pgprot_val(_prot);

#if defined(__i386__)
	/* On PPro and successors, PCD alone doesn't always mean 
	    uncached because of interactions with the MTRRs. PCD | PWT
	    means definitely uncached. */ 
	if (boot_cpu_data.x86 > 3)
		prot |= _PAGE_PCD | _PAGE_PWT;
#elif defined(__powerpc__)
	prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
#elif defined(__mc68000__)
	if (MMU_IS_SUN3)
		prot |= SUN3_PAGE_NOCACHE;
	else if (MMU_IS_851 || MMU_IS_030)
		prot |= _PAGE_NOCACHE030;
	/* Use no-cache mode, serialized */
	else if (MMU_IS_040 || MMU_IS_060)
		prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S;
#elif defined(__mips__)
	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
#elif defined(__arm__) && defined(CONFIG_CPU_32)
	/* Turn off caching for all I/O areas */
	prot &= ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE);
#endif

	return __pgprot(prot);
}
Beispiel #30
0
static int
__change_page_attr(struct page *page, pgprot_t prot)
{ 
	pte_t *kpte; 
	unsigned long address;
	struct page *kpte_page;

	BUG_ON(PageHighMem(page));
	address = (unsigned long)page_address(page);

	kpte = lookup_address(address);
	if (!kpte)
		return -EINVAL;
	kpte_page = virt_to_page(kpte);
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
		if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
			set_pte_atomic(kpte, mk_pte(page, prot)); 
		} else {
			struct page *split = split_large_page(address, prot); 
			if (!split)
				return -ENOMEM;
			set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
			kpte_page = split;
		}	
		get_page(kpte_page);
	} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
		__put_page(kpte_page);
	} else
		BUG();

	/*
	 * If the pte was reserved, it means it was created at boot
	 * time (not via split_large_page) and in turn we must not
	 * replace it with a largepage.
	 */
	if (!PageReserved(kpte_page)) {
		/* memleak and potential failed 2M page regeneration */
		BUG_ON(!page_count(kpte_page));

		if (cpu_has_pse && (page_count(kpte_page) == 1)) {
			list_add(&kpte_page->lru, &df_list);
			revert_page(kpte_page, address);
		}
	}
	return 0;
}