示例#1
0
static int map_page(unsigned long va, unsigned long pa, pgprot_t prot)
{
	pgd_t *pge;
	pud_t *pue;
	pmd_t *pme;
	pte_t *pte;
	int err = -ENOMEM;

	spin_lock(&init_mm.page_table_lock);

	/* Use upper 10 bits of VA to index the first level map */
	pge = pgd_offset_k(va);
	pue = pud_offset(pge, va);
	pme = pmd_offset(pue, va);

	/* Use middle 10 bits of VA to index the second-level map */
	pte = pte_alloc_kernel(&init_mm, pme, va);
	if (pte != 0) {
		err = 0;
		set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot));
	}

	spin_unlock(&init_mm.page_table_lock);
	return err;
}
示例#2
0
/*
 * Note that this is intended to be called only from the copy_user_page
 * asm code; anything else will require special locking to prevent the
 * mini-cache space being re-used.  (Note: probably preempt unsafe).
 *
 * We rely on the fact that the minicache is 2K, and we'll be pushing
 * 4K of data through it, so we don't actually have to specifically
 * flush the minicache when we change the mapping.
 *
 * Note also: assert(PAGE_OFFSET <= virt < high_memory).
 * Unsafe: preempt, kmap.
 */
unsigned long map_page_minicache(unsigned long virt)
{
	set_pte(minicache_pte, mk_pte_phys(__pa(virt), minicache_pgprot));
	cpu_tlb_invalidate_page(minicache_address, 0);

	return minicache_address;
}
示例#3
0
void kvm_arch_hardware_enable(void *garbage)
{
	long  status;
	long  tmp_base;
	unsigned long pte;
	unsigned long saved_psr;
	int slot;

	pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
				PAGE_KERNEL));
	local_irq_save(saved_psr);
	slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
	local_irq_restore(saved_psr);
	if (slot < 0)
		return;

	spin_lock(&vp_lock);
	status = ia64_pal_vp_init_env(kvm_vsa_base ?
				VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
			__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
	if (status != 0) {
		printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
		return ;
	}

	if (!kvm_vsa_base) {
		kvm_vsa_base = tmp_base;
		printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
	}
	spin_unlock(&vp_lock);
	ia64_ptr_entry(0x3, slot);
}
示例#4
0
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long address;
	int i;

	for (i = 0; i < numpages; i++) {
		address = page_to_phys(page + i);
		pgd = pgd_offset_k(address);
		pud = pud_offset(pgd, address);
		pmd = pmd_offset(pud, address);
		pte = pte_offset_kernel(pmd, address);
		if (!enable) {
			__ptep_ipte(address, pte);
			pte_val(*pte) = _PAGE_TYPE_EMPTY;
			continue;
		}
		*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
		/* Flush cpu write queue. */
		mb();
	}
}
static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
{
	pgd_t *pgdp;
	pmd_t *pmdp;
	pte_t *ptep;

	unsigned long flags = 1; /* 1 = CB0-1 device */


	DEBUG_IOREMAP(("shmedia_mapiopage pa %08x va %08x\n",  pa, va));

	pgdp = pgd_offset_k(va);
	if (pgd_none(*pgdp)) {
		pmdp = alloc_bootmem_low_pages(PTRS_PER_PMD * sizeof(pmd_t));
		if (pmdp == NULL) panic("No memory for pmd\n");
		memset(pmdp, 0, PTRS_PER_PGD * sizeof(pmd_t));
		set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
	}

	pmdp = pmd_offset(pgdp, va);
	if (pmd_none(*pmdp)) {
		ptep = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
		if (ptep == NULL) panic("No memory for pte\n");
		clear_page((void *)ptep);
		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
	}

	ptep = pte_offset(pmdp, va);
	set_pte(ptep, mk_pte_phys(pa, __pgprot(_PAGE_PRESENT |
			_PAGE_READ | _PAGE_WRITE | 
			_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
}
示例#6
0
/*
 * Map all physical memory into kernel's address space.
 *
 * This is explicitly coded for two-level page tables, so if you need
 * something else then this needs to change.
 */
static void __init map_ram(void)
{
	unsigned long v, p, e;
	pgprot_t prot;
	pgd_t *pge;
	pud_t *pue;
	pmd_t *pme;
	pte_t *pte;
	/* These mark extents of read-only kernel pages...
	 * ...from vmlinux.lds.S
	 */
	struct memblock_region *region;

	v = PAGE_OFFSET;

	for_each_memblock(memory, region) {
		p = (u32) region->base & PAGE_MASK;
		e = p + (u32) region->size;

		v = (u32) __va(p);
		pge = pgd_offset_k(v);

		while (p < e) {
			int j;
			pue = pud_offset(pge, v);
			pme = pmd_offset(pue, v);

			if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
				panic("%s: OR1K kernel hardcoded for "
				      "two-level page tables",
				     __func__);
			}

			/* Alloc one page for holding PTE's... */
			pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
			set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));

			/* Fill the newly allocated page with PTE'S */
			for (j = 0; p < e && j < PTRS_PER_PGD;
			     v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
				if (v >= (u32) _e_kernel_ro ||
				    v < (u32) _s_kernel_ro)
					prot = PAGE_KERNEL;
				else
					prot = PAGE_KERNEL_RO;

				set_pte(pte, mk_pte_phys(p, prot));
			}

			pge++;
		}

		printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
		       region->base, region->base + region->size);
	}
示例#7
0
/*
 * This contains the code to setup the memory map on an ARM2/ARM250/ARM3
 * machine. This is both processor & architecture specific, and requires
 * some more work to get it to fit into our separate processor and
 * architecture structure.
 */
void __init memtable_init(struct meminfo *mi)
{
	pte_t *pte;
	int i;

	page_nr = max_low_pfn;

	pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
	pte[0] = mk_pte_phys(PAGE_OFFSET + 491520, PAGE_READONLY);
	pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte);

	for (i = 1; i < PTRS_PER_PGD; i++)
		pgd_val(swapper_pg_dir[i]) = 0;
}
示例#8
0
static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
{
	unsigned long pte;
	struct kvm *kvm = vcpu->kvm;
	int r;

	/*Insert a pair of tr to map vmm*/
	pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
	r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
	if (r < 0)
		goto out;
	vcpu->arch.vmm_tr_slot = r;
	/*Insert a pairt of tr to map data of vm*/
	pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
	r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
					pte, KVM_VM_DATA_SHIFT);
	if (r < 0)
		goto out;
	vcpu->arch.vm_tr_slot = r;
	r = 0;
out:
	return r;

}
示例#9
0
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
                                  unsigned long phys_addr, unsigned long flags)
{
    unsigned long end;

    address &= ~PMD_MASK;
    end = address + size;
    if (end > PMD_SIZE)
        end = PMD_SIZE;
    do {
        if (!pte_none(*pte))
            printk("remap_area_pte: page already exists\n");
        set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT |
                                 _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
        address += PAGE_SIZE;
        phys_addr += PAGE_SIZE;
        pte++;
    } while (address < end);
}
示例#10
0
文件: mm-armv.c 项目: hugh712/Jollen
/*
 * Add a PAGE mapping between VIRT and PHYS in domain
 * DOMAIN with protection PROT.  Note that due to the
 * way we map the PTEs, we must allocate two PTE_SIZE'd
 * blocks - one for the Linux pte table, and one for
 * the hardware pte table.
 */
static inline void
alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot)
{
	pmd_t *pmdp;
	pte_t *ptep;

	pmdp = pmd_offset(pgd_offset_k(virt), virt);

	if (pmd_none(*pmdp)) {
		pte_t *ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
						      sizeof(pte_t));

		ptep += PTRS_PER_PTE;

		set_pmd(pmdp, __mk_pmd(ptep, PMD_TYPE_TABLE | PMD_DOMAIN(domain)));
	}
	ptep = pte_offset(pmdp, virt);

	set_pte(ptep, mk_pte_phys(phys, __pgprot(prot)));
}
示例#11
0
int
map_page(unsigned long va, unsigned long pa, int flags)
{
	pmd_t *pd;
	pte_t *pg;
	int err = -ENOMEM;

	spin_lock(&init_mm.page_table_lock);
	/* Use upper 10 bits of VA to index the first level map */
	pd = pmd_offset(pgd_offset_k(va), va);
	/* Use middle 10 bits of VA to index the second-level map */
	pg = pte_alloc(&init_mm, pd, va);
	if (pg != 0) {
		err = 0;
		set_pte(pg, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
		if (mem_init_done)
			flush_HPTE(0, va, pg);
	}
	spin_unlock(&init_mm.page_table_lock);
	return err;
}
示例#12
0
文件: ioremap.c 项目: 1x23/unifi-gpl
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
	unsigned long phys_addr, pgprot_t prot)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	if (address >= end)
		BUG();
	do {
		if (!pte_none(*pte)) {
			printk("remap_area_pte: page already exists\n");
			BUG();
		}
		set_pte(pte, mk_pte_phys(phys_addr, prot));
		address += PAGE_SIZE;
		phys_addr += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
}
示例#13
0
文件: acpi.c 项目: NieHao/Tomato-RAF
/*
 * acpi_create_identity_pmd
 *
 * Create a new, identity mapped pmd.
 *
 * Do this by creating new page directory, and marking all the pages as R/W
 * Then set it as the new Page Middle Directory.
 * And, of course, flush the TLB so it takes effect.
 *
 * We save the address of the old one, for later restoration.
 */
static void acpi_create_identity_pmd (void)
{
	pgd_t *pgd;
	int i;

	ptep = (pte_t*)__get_free_page(GFP_KERNEL);

	/* fill page with low mapping */
	for (i = 0; i < PTRS_PER_PTE; i++)
		set_pte(ptep + i, mk_pte_phys(i << PAGE_SHIFT, PAGE_SHARED));

	pgd = pgd_offset(current->active_mm, 0);
	pmd = pmd_alloc(current->mm,pgd, 0);

	/* save the old pmd */
	saved_pmd = *pmd;

	/* set the new one */
	set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(ptep)));

	/* flush the TLB */
	local_flush_tlb();
}
示例#14
0
文件: memory.c 项目: davidbau/davej
/*
 * maps a range of physical memory into the requested pages. the old
 * mappings are removed. any references to nonexistent pages results
 * in null mappings (currently treated as "copy-on-access")
 */
static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
	unsigned long phys_addr, pgprot_t prot)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		struct page *page;
		pte_t oldpage;
		oldpage = ptep_get_and_clear(pte);

		page = virt_to_page(__va(phys_addr));
		if ((!VALID_PAGE(page)) || PageReserved(page))
 			set_pte(pte, mk_pte_phys(phys_addr, prot));
		forget_pte(oldpage);
		address += PAGE_SIZE;
		phys_addr += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
}
示例#15
0
void kvm_arch_hardware_disable(void *garbage)
{

	long status;
	int slot;
	unsigned long pte;
	unsigned long saved_psr;
	unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);

	pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
				PAGE_KERNEL));

	local_irq_save(saved_psr);
	slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
	local_irq_restore(saved_psr);
	if (slot < 0)
		return;

	status = ia64_pal_vp_exit_env(host_iva);
	if (status)
		printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
				status);
	ia64_ptr_entry(0x3, slot);
}
示例#16
0
extern inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
	unsigned long phys_addr, unsigned long flags)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	if (address >= end)
		BUG();
	do {
		if (!pte_none(*pte)) {
			printk("remap_area_pte: page already exists\n");
			BUG();
		}
		set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | __READABLE | 
							     __WRITEABLE | _PAGE_GLOBAL |
							     _PAGE_KERNEL | flags)));
		address += PAGE_SIZE;
		phys_addr += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
}
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
	unsigned long phys_addr, unsigned long flags)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;

	DEBUG_IOREMAP(("    %s: pte %x address %x size %x phys_addr %x\n", \
			__FUNCTION__,pte,address,size,phys_addr));

	do {
		if (!pte_none(*pte))
			printk("remap_area_pte: page already exists\n");
		set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT |
					_PAGE_READ | _PAGE_WRITE | 
					_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
		address += PAGE_SIZE;
		phys_addr += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
}