예제 #1
0
파일: page_tables.c 프로젝트: 020gzh/linux
/*H:480
 * (vi) Mapping the Switcher when the Guest is about to run.
 *
 * The Switcher and the two pages for this CPU need to be visible in the Guest
 * (and not the pages for other CPUs).
 *
 * The pages for the pagetables have all been allocated before: we just need
 * to make sure the actual PTEs are up-to-date for the CPU we're about to run
 * on.
 */
void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
{
	unsigned long base;
	struct page *percpu_switcher_page, *regs_page;
	pte_t *pte;
	struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];

	/* Switcher page should always be mapped by now! */
	BUG_ON(!pgdir->switcher_mapped);

	/* 
	 * Remember that we have two pages for each Host CPU, so we can run a
	 * Guest on each CPU without them interfering.  We need to make sure
	 * those pages are mapped correctly in the Guest, but since we usually
	 * run on the same CPU, we cache that, and only update the mappings
	 * when we move.
	 */
	if (pgdir->last_host_cpu == raw_smp_processor_id())
		return;

	/* -1 means unknown so we remove everything. */
	if (pgdir->last_host_cpu == -1) {
		unsigned int i;
		for_each_possible_cpu(i)
			remove_switcher_percpu_map(cpu, i);
	} else {
		/* We know exactly what CPU mapping to remove. */
		remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
	}

	/*
	 * When we're running the Guest, we want the Guest's "regs" page to
	 * appear where the first Switcher page for this CPU is.  This is an
	 * optimization: when the Switcher saves the Guest registers, it saves
	 * them into the first page of this CPU's "struct lguest_pages": if we
	 * make sure the Guest's register page is already mapped there, we
	 * don't have to copy them out again.
	 */
	/* Find the shadow PTE for this regs page. */
	base = switcher_addr + PAGE_SIZE
		+ raw_smp_processor_id() * sizeof(struct lguest_pages);
	pte = find_spte(cpu, base, false, 0, 0);
	regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
	get_page(regs_page);
	set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));

	/*
	 * We map the second page of the struct lguest_pages read-only in
	 * the Guest: the IDT, GDT and other things it's not supposed to
	 * change.
	 */
	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
	percpu_switcher_page
		= lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
	get_page(percpu_switcher_page);
	set_pte(pte, mk_pte(percpu_switcher_page,
			    __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));

	pgdir->last_host_cpu = raw_smp_processor_id();
}
예제 #2
0
static inline void copy_one_pte(pte_t * old_pte, pte_t * new_pte, int cow)
{
	pte_t pte = *old_pte;
	unsigned long page_nr;

	if (pte_none(pte))
		return;
	if (!pte_present(pte)) {
		swap_duplicate(pte_val(pte));
		set_pte(new_pte, pte);
		return;
	}
	page_nr = MAP_NR(pte_page(pte));
	if (page_nr >= MAP_NR(high_memory) || PageReserved(mem_map+page_nr)) {
		set_pte(new_pte, pte);
		return;
	}
	if (cow)
		pte = pte_wrprotect(pte);
	if (delete_from_swap_cache(page_nr))
		pte = pte_mkdirty(pte);
	set_pte(new_pte, pte_mkold(pte));
	set_pte(old_pte, pte);
	mem_map[page_nr].count++;
}
예제 #3
0
static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
{
	pte_t pte = READ_ONCE(*src_ptep);

	if (pte_valid(pte)) {
		/*
		 * Resume will overwrite areas that may be marked
		 * read only (code, rodata). Clear the RDONLY bit from
		 * the temporary mappings we use during restore.
		 */
		set_pte(dst_ptep, pte_mkwrite(pte));
	} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
		/*
		 * debug_pagealloc will removed the PTE_VALID bit if
		 * the page isn't in use by the resume kernel. It may have
		 * been in use by the original kernel, in which case we need
		 * to put it back in our copy to do the restore.
		 *
		 * Before marking this entry valid, check the pfn should
		 * be mapped.
		 */
		BUG_ON(!pfn_valid(pte_pfn(pte)));

		set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
	}
}
예제 #4
0
/*
 * Trying to stop swapping from a file is fraught with races, so
 * we repeat quite a bit here when we have to pause. swapoff()
 * isn't exactly timing-critical, so who cares (but this is /really/
 * inefficient, ugh).
 *
 * We return 1 after having slept, which makes the process start over
 * from the beginning for this process..
 */
static inline int unuse_pte(struct vm_area_struct * vma, unsigned long address,
	pte_t *dir, unsigned int type, unsigned long page)
{
	pte_t pte = *dir;

	if (pte_none(pte))
		return 0;
	if (pte_present(pte)) {
		unsigned long page_nr = MAP_NR(pte_page(pte));
		if (page_nr >= MAP_NR(high_memory))
			return 0;
		if (!in_swap_cache(page_nr))
			return 0;
		if (SWP_TYPE(in_swap_cache(page_nr)) != type)
			return 0;
		delete_from_swap_cache(page_nr);
		set_pte(dir, pte_mkdirty(pte));
		return 0;
	}
	if (SWP_TYPE(pte_val(pte)) != type)
		return 0;
	read_swap_page(pte_val(pte), (char *) page);
#if 0 /* Is this really needed here, hasn't it been solved elsewhere? */
	flush_page_to_ram(page);
#endif
	if (pte_val(*dir) != pte_val(pte)) {
		free_page(page);
		return 1;
	}
	set_pte(dir, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
	flush_tlb_page(vma, address);
	++vma->vm_mm->rss;
	swap_free(pte_val(pte));
	return 1;
}
예제 #5
0
/*H:520
 * Setting up the Switcher PTE page for given CPU is fairly easy, given
 * the CPU number and the "struct page"s for the Switcher code itself.
 *
 * Currently the Switcher is less than a page long, so "pages" is always 1.
 */
static __init void populate_switcher_pte_page(unsigned int cpu,
					      struct page *switcher_page[],
					      unsigned int pages)
{
	unsigned int i;
	pte_t *pte = switcher_pte_page(cpu);

	/* The first entries are easy: they map the Switcher code. */
	for (i = 0; i < pages; i++) {
		set_pte(&pte[i], mk_pte(switcher_page[i],
				__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
	}

	/* The only other thing we map is this CPU's pair of pages. */
	i = pages + cpu*2;

	/* First page (Guest registers) is writable from the Guest */
	set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
			 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));

	/*
	 * The second page contains the "struct lguest_ro_state", and is
	 * read-only.
	 */
	set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
			   __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
}
예제 #6
0
파일: page_tables.c 프로젝트: 020gzh/linux
/*H:481
 * This clears the Switcher mappings for cpu #i.
 */
static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
{
	unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
	pte_t *pte;

	/* Clear the mappings for both pages. */
	pte = find_spte(cpu, base, false, 0, 0);
	release_pte(*pte);
	set_pte(pte, __pte(0));

	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
	release_pte(*pte);
	set_pte(pte, __pte(0));
}
예제 #7
0
/**
 * Replace the PFN of a PTE with the address of the actual page.
 *
 * The caller maps a reserved dummy page at the address with the desired access
 * and flags.
 *
 * This hack is required for older Linux kernels which don't provide
 * remap_pfn_range().
 *
 * @returns 0 on success, -ENOMEM on failure.
 * @param   mm          The memory context.
 * @param   ulAddr      The mapping address.
 * @param   Phys        The physical address of the page to map.
 */
static int rtR0MemObjLinuxFixPte(struct mm_struct *mm, unsigned long ulAddr, RTHCPHYS Phys)
{
    int rc = -ENOMEM;
    pgd_t *pgd;

    spin_lock(&mm->page_table_lock);

    pgd = pgd_offset(mm, ulAddr);
    if (!pgd_none(*pgd) && !pgd_bad(*pgd))
    {
        pmd_t *pmd = pmd_offset(pgd, ulAddr);
        if (!pmd_none(*pmd))
        {
            pte_t *ptep = pte_offset_map(pmd, ulAddr);
            if (ptep)
            {
                pte_t pte = *ptep;
                pte.pte_high &= 0xfff00000;
                pte.pte_high |= ((Phys >> 32) & 0x000fffff);
                pte.pte_low  &= 0x00000fff;
                pte.pte_low  |= (Phys & 0xfffff000);
                set_pte(ptep, pte);
                pte_unmap(ptep);
                rc = 0;
            }
        }
예제 #8
0
/*
 * Note that this is intended to be called only from the copy_user_page
 * asm code; anything else will require special locking to prevent the
 * mini-cache space being re-used.  (Note: probably preempt unsafe).
 *
 * We rely on the fact that the minicache is 2K, and we'll be pushing
 * 4K of data through it, so we don't actually have to specifically
 * flush the minicache when we change the mapping.
 *
 * Note also: assert(PAGE_OFFSET <= virt < high_memory).
 * Unsafe: preempt, kmap.
 */
unsigned long map_page_minicache(unsigned long virt)
{
	set_pte(minicache_pte, mk_pte_phys(__pa(virt), minicache_pgprot));
	cpu_tlb_invalidate_page(minicache_address, 0);

	return minicache_address;
}
예제 #9
0
static inline void
remove_mapping_pte_range (pmd_t *pmd, unsigned long address, unsigned long size)
{
	pte_t *pte;
	unsigned long end;

	if (pmd_none (*pmd))
		return;
	if (pmd_bad (*pmd)){
		printk ("remove_graphics_pte_range: bad pmd (%08lx)\n", pmd_val (*pmd));
		pmd_clear (pmd);
		return;
	}
	pte = pte_offset (pmd, address);
	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		pte_t entry = *pte;
		if (pte_present (entry))
			set_pte (pte, pte_modify (entry, PAGE_NONE));
		address += PAGE_SIZE;
		pte++;
	} while (address < end);
						  
}
예제 #10
0
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
	unsigned long phys_addr, unsigned long flags)
{
	unsigned long end;
	unsigned long pfn;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	if (address >= end)
		BUG();
	pfn = phys_addr >> PAGE_SHIFT;
	do {
		if (!pte_none(*pte)) {
			printk("remap_area_pte: page already exists\n");
			BUG();
		}
		set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | 
					_PAGE_DIRTY | _PAGE_ACCESSED | flags)));
		address += PAGE_SIZE;
		pfn++;
		pte++;
	} while (address && (address < end));
}
예제 #11
0
static int relocate_restore_code(void)
{
	pgd_t *pgd;
	pud_t *pud;

	relocated_restore_code = get_safe_page(GFP_ATOMIC);
	if (!relocated_restore_code)
		return -ENOMEM;

	memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);

	/* Make the page containing the relocated code executable */
	pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
	pud = pud_offset(pgd, relocated_restore_code);
	if (pud_large(*pud)) {
		set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
	} else {
		pmd_t *pmd = pmd_offset(pud, relocated_restore_code);

		if (pmd_large(*pmd)) {
			set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
		} else {
			pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);

			set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
		}
	}
	__flush_tlb_all();

	return 0;
}
예제 #12
0
파일: page_tables.c 프로젝트: 020gzh/linux
/*H:501
 * We do need the Switcher code mapped at all times, so we allocate that
 * part of the Guest page table here.  We map the Switcher code immediately,
 * but defer mapping of the guest register page and IDT/LDT etc page until
 * just before we run the guest in map_switcher_in_guest().
 *
 * We *could* do this setup in map_switcher_in_guest(), but at that point
 * we've interrupts disabled, and allocating pages like that is fraught: we
 * can't sleep if we need to free up some memory.
 */
static bool allocate_switcher_mapping(struct lg_cpu *cpu)
{
	int i;

	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
		pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
				       CHECK_GPGD_MASK, _PAGE_TABLE);
		if (!pte)
			return false;

		/*
		 * Map the switcher page if not already there.  It might
		 * already be there because we call allocate_switcher_mapping()
		 * in guest_set_pgd() just in case it did discard our Switcher
		 * mapping, but it probably didn't.
		 */
		if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
			/* Get a reference to the Switcher page. */
			get_page(lg_switcher_pages[0]);
			/* Create a read-only, exectuable, kernel-style PTE */
			set_pte(pte,
				mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
		}
	}
	cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
	return true;
}
예제 #13
0
/**
 * 建立临时内核映射
 * type和CPU共同确定用哪个固定映射的线性地址映射请求页。
 */
void *kmap_atomic(struct page *page, enum km_type type)
{
	enum fixed_addresses idx;
	unsigned long vaddr;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	inc_preempt_count();
	/**
	 * 如果被映射的页不属于高端内存,当然用不着映射。直接返回线性地址就行了。
	 */
	if (!PageHighMem(page))
		return page_address(page);

	/**
	 * 通过type和CPU确定线性地址。
	 */
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
	if (!pte_none(*(kmap_pte-idx)))
		BUG();
#endif
	/**
	 * 将线性地址与页表项建立映射。
	 */
	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
	/**
	 * 当然,最后必须刷新一下TLB。然后才能返回线性地址。
	 */
	__flush_tlb_one(vaddr);

	return (void*) vaddr;
}
예제 #14
0
파일: bus_subr.c 프로젝트: ryo/netbsd-src
/*
 * Create a temporary, one-page mapping for a device.
 * This is used by some device probe routines that
 * need to do peek/write/read tricks.
 */
void *
bus_tmapin(int bustype, int pa)
{
	vaddr_t pgva;
	int off, pte;

	if ((bustype < 0) || (bustype >= BUS__NTYPES))
		panic("bus_tmapin: bustype");

	off = pa & PGOFSET;
	pa -= off;

	pa &= bus_info[bustype].mask;
	pa |= bus_info[bustype].base;

	pte = PA_PGNUM(pa);
	pte |= (bus_info[bustype].type << PG_MOD_SHIFT);
	pte |= (PG_VALID | PG_WRITE | PG_SYSTEM | PG_NC);

	if (tmp_vpages_inuse)
		panic("bus_tmapin: tmp_vpages_inuse");
	tmp_vpages_inuse++;

	pgva = tmp_vpages[1];
	set_pte(pgva, pte);

	return ((void *)(pgva + off));
}
예제 #15
0
static inline void 
remap_area_pte(pte_t *pte, unsigned long address, unsigned long size,
	       unsigned long phys_addr, unsigned long flags)
{
	unsigned long end, pfn;
	pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
				   _PAGE_ACCESSED | flags);

	address &= ~PMD_MASK;

	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;

	BUG_ON(address >= end);

	pfn = phys_addr >> PAGE_SHIFT;
	do {
		BUG_ON(!pte_none(*pte));

		set_pte(pte, pfn_pte(pfn, pgprot));

		address += PAGE_SIZE;
		pfn++;
		pte++;
	} while (address && (address < end));
}
예제 #16
0
/*
 * copy_user_page
 * @to: P1 address
 * @from: P1 address
 * @address: U0 address to be mapped
 * @page: page (virt_to_page(to))
 */
void copy_user_page(void *to, void *from, unsigned long address, 
		    struct page *page)
{
	__set_bit(PG_mapped, &page->flags);
	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
		copy_page(to, from);
	else {
		pgprot_t pgprot = __pgprot(_PAGE_PRESENT | 
					   _PAGE_RW | _PAGE_CACHABLE |
					   _PAGE_DIRTY | _PAGE_ACCESSED | 
					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
		unsigned long phys_addr = PHYSADDR(to);
		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
		pgd_t *dir = pgd_offset_k(p3_addr);
		pmd_t *pmd = pmd_offset(dir, p3_addr);
		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
		pte_t entry;
		unsigned long flags;

		entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
		down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
		set_pte(pte, entry);
		local_irq_save(flags);
		__flush_tlb_page(get_asid(), p3_addr);
		local_irq_restore(flags);
		update_mmu_cache(NULL, p3_addr, entry);
		__copy_user_page((void *)p3_addr, from, to);
		pte_clear(&init_mm, p3_addr, pte);
		up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
	}
}
예제 #17
0
/*
 * Map the 32bit vsyscall page on demand.
 *
 * RED-PEN: This knows too much about high level VM.
 *
 * Alternative would be to generate a vma with appropriate backing options
 * and let it be handled by generic VM.
 */
int __map_syscall32(struct mm_struct *mm, unsigned long address)
{ 
	pgd_t *pgd;
	pud_t *pud;
	pte_t *pte;
	pmd_t *pmd;
	int err = -ENOMEM;

	spin_lock(&mm->page_table_lock); 
 	pgd = pgd_offset(mm, address);
 	pud = pud_alloc(mm, pgd, address);
 	if (pud) {
 		pmd = pmd_alloc(mm, pud, address);
 		if (pmd && (pte = pte_alloc_map(mm, pmd, address)) != NULL) {
 			if (pte_none(*pte)) {
 				set_pte(pte,
 					mk_pte(virt_to_page(syscall32_page),
 					       PAGE_KERNEL_VSYSCALL32));
 			}
 			/* Flush only the local CPU. Other CPUs taking a fault
 			   will just end up here again
			   This probably not needed and just paranoia. */
 			__flush_tlb_one(address);
 			err = 0;
		}
	}
	spin_unlock(&mm->page_table_lock);
	return err;
}
예제 #18
0
void *kmap_atomic(struct page *page, enum km_type type)
{
	unsigned int idx;
	unsigned long vaddr;
	void *kmap;

	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);

	debug_kmap_atomic(type);

	kmap = kmap_high_get(page);
	if (kmap)
		return kmap;

	idx = type + KM_TYPE_NR * smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
	/*
	 * With debugging enabled, kunmap_atomic forces that entry to 0.
	 * Make sure it was indeed properly unmapped.
	 */
	BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
#endif
	set_pte(TOP_PTE(vaddr), mk_pte(page, kmap_prot));
	/*
	 * When debugging is off, kunmap_atomic leaves the previous mapping
	 * in place, so this TLB flush ensures the TLB is updated with the
	 * new mapping.
	 */
	local_flush_tlb_kernel_page(vaddr);

	return (void *)vaddr;
}
예제 #19
0
파일: pg-sh4.c 프로젝트: ivucica/linux
/*
 * copy_user_page
 * @to: P1 address
 * @from: P1 address
 * @address: U0 address to be mapped
 * @page: page (virt_to_page(to))
 */
void copy_user_page(void *to, void *from, unsigned long address,
		    struct page *page)
{
	__set_bit(PG_mapped, &page->flags);
	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
		copy_page(to, from);
	else {
		unsigned long phys_addr = PHYSADDR(to);
		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
		pgd_t *pgd = pgd_offset_k(p3_addr);
		pud_t *pud = pud_offset(pgd, p3_addr);
		pmd_t *pmd = pmd_offset(pud, p3_addr);
		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
		pte_t entry;
		unsigned long flags;

		entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
		mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
		set_pte(pte, entry);
		local_irq_save(flags);
		__flush_tlb_page(get_asid(), p3_addr);
		local_irq_restore(flags);
		update_mmu_cache(NULL, p3_addr, entry);
		__copy_user_page((void *)p3_addr, from, to);
		pte_clear(&init_mm, p3_addr, pte);
		mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
	}
}
예제 #20
0
static int set_up_temporary_text_mapping(pgd_t *pgd_base)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	pgd = pgd_base + pgd_index(restore_jump_address);

	pmd = resume_one_md_table_init(pgd);
	if (!pmd)
		return -ENOMEM;

	if (boot_cpu_has(X86_FEATURE_PSE)) {
		set_pmd(pmd + pmd_index(restore_jump_address),
		__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
	} else {
		pte = resume_one_page_table_init(pmd);
		if (!pte)
			return -ENOMEM;
		set_pte(pte + pte_index(restore_jump_address),
		__pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
	}

	return 0;
}
예제 #21
0
void *kmap_atomic(struct page *page, enum km_type type)
{
	unsigned long idx;
	unsigned long vaddr;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);

	debug_kmap_atomic(type);
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);

/* XXX Fix - Anton */
#if 0
	__flush_cache_one(vaddr);
#else
	flush_cache_all();
#endif

#ifdef CONFIG_DEBUG_HIGHMEM
	BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
/* XXX Fix - Anton */
#if 0
	__flush_tlb_one(vaddr);
#else
	flush_tlb_all();
#endif

	return (void*) vaddr;
}
예제 #22
0
void *kmap_atomic(struct page *page)
{
	unsigned long vaddr;
	long idx, type;

	preempt_disable();
	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);

	type = kmap_atomic_idx_push();
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);

/* XXX Fix - Anton */
#if 0
	__flush_cache_one(vaddr);
#else
	flush_cache_all();
#endif

#ifdef CONFIG_DEBUG_HIGHMEM
	BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
/* XXX Fix - Anton */
#if 0
	__flush_tlb_one(vaddr);
#else
	flush_tlb_all();
#endif

	return (void*) vaddr;
}
예제 #23
0
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = alloc_pgd_table(GFP_KERNEL);
	if (!new_pgd)
		goto no_pgd;

	/*
	 * This lock is here just to satisfy pmd_alloc and pte_lock
	 */
	spin_lock(&mm->page_table_lock);

	/*
	 * On ARM, first page must always be allocated since it contains
	 * the machine vectors.
	 */
	new_pmd = pmd_alloc(mm, new_pgd, 0);
	if (!new_pmd)
		goto no_pmd;

	new_pte = pte_alloc(mm, new_pmd, 0);
	if (!new_pte)
		goto no_pte;

	init_pgd = pgd_offset_k(0);
	init_pmd = pmd_offset(init_pgd, 0);
	init_pte = pte_offset(init_pmd, 0);

	set_pte(new_pte, *init_pte);

	/*
	 * most of the page table entries are zeroed
	 * wne the table is created.
	 */
	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
		(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

	spin_unlock(&mm->page_table_lock);

	/* update MEMC tables */
	cpu_memc_update_all(new_pgd);
	return new_pgd;

no_pte:
	spin_unlock(&mm->page_table_lock);
	pmd_free(new_pmd);
	free_pgd_slow(new_pgd);
	return NULL;

no_pmd:
	spin_unlock(&mm->page_table_lock);
	free_pgd_slow(new_pgd);
	return NULL;

no_pgd:
	return NULL;
}
예제 #24
0
void unprotect_page(void *vaddr, unsigned long prot)
{
	pteval_t *p_pte = get_pte(table_root, (uintptr_t)vaddr);
	pteval_t n_pte = *p_pte & ~prot;

	set_pte(table_root, n_pte, vaddr);
}
예제 #25
0
/*
 * maps a range of vmalloc()ed memory into the requested pages. the old
 * mappings are removed. 
 */
static inline void
vmap_pte_range (pte_t *pte, unsigned long address, unsigned long size, unsigned long vaddr)
{
	unsigned long end;
	pgd_t *vdir;
	pmd_t *vpmd;
	pte_t *vpte;
	
	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		pte_t oldpage = *pte;
		struct page * page;
		pte_clear(pte);

		vdir = pgd_offset_k (vaddr);
		vpmd = pmd_offset (vdir, vaddr);
		vpte = pte_offset (vpmd, vaddr);
		page = pte_page (*vpte);

		set_pte(pte, mk_pte(page, PAGE_USERIO));
		forget_pte(oldpage);
		address += PAGE_SIZE;
		vaddr += PAGE_SIZE;
		pte++;
	} while (address < end);
}
예제 #26
0
/*
 * Associate a virtual page frame with a given physical page frame
 * and protection flags for that frame.
 */
static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = swapper_pg_dir + pgd_index(vaddr);
	if (pgd_none(*pgd)) {
		BUG();
		return;
	}
	pud = pud_offset(pgd, vaddr);
	if (pud_none(*pud)) {
		BUG();
		return;
	}
	pmd = pmd_offset(pud, vaddr);
	if (pmd_none(*pmd)) {
		BUG();
		return;
	}
	pte = pte_offset_kernel(pmd, vaddr);
	/* <pfn,flags> stored as-is, to permit clearing entries */
	set_pte(pte, pfn_pte(pfn, flags));

	/*
	 * It's enough to flush this one mapping.
	 * This appears conservative since it is only called
	 * from __set_fixmap.
	 */
	local_flush_tlb_page(NULL, vaddr, PAGE_SIZE);
}
예제 #27
0
static inline void remap_area_pte(pte_t * pte, unsigned long address,
	phys_t size, phys_t phys_addr, unsigned long flags)
{
	phys_t end;
	unsigned long pfn;
	pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
	                           | __WRITEABLE | flags);

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	BUG_ON(address >= end);
	pfn = phys_addr >> PAGE_SHIFT;
	do {
		if (!pte_none(*pte)) {
			printk("remap_area_pte: page already exists\n");
			BUG();
		}
		set_pte(pte, pfn_pte(pfn, pgprot));
		address += PAGE_SIZE;
		pfn++;
		pte++;
	} while (address && (address < end));
}
예제 #28
0
파일: mmu.c 프로젝트: 1314cc/linux
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
				  unsigned long end, unsigned long pfn,
				  pgprot_t prot,
				  phys_addr_t (*pgtable_alloc)(void))
{
	pte_t *pte;

	if (pmd_none(*pmd) || pmd_sect(*pmd)) {
		phys_addr_t pte_phys;
		BUG_ON(!pgtable_alloc);
		pte_phys = pgtable_alloc();
		pte = pte_set_fixmap(pte_phys);
		if (pmd_sect(*pmd))
			split_pmd(pmd, pte);
		__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
		flush_tlb_all();
		pte_clear_fixmap();
	}
	BUG_ON(pmd_bad(*pmd));

	pte = pte_set_fixmap_offset(pmd, addr);
	do {
		set_pte(pte, pfn_pte(pfn, prot));
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);

	pte_clear_fixmap();
}
/*
 * copy_user_page
 * @to: kernel logical address
 * @from: kernel logical address
 * @address: U0 address to be mapped
 * @page: page (virt_to_page(to))
 */
void copy_user_page(void *to, void *from, unsigned long address,
		    struct page *page)
{
	extern void __copy_page_wb(void *to, void *from);

	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
		__copy_page_wb(to, from);
	else {
		pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
					   _PAGE_RW | _PAGE_CACHABLE |
					   _PAGE_DIRTY | _PAGE_ACCESSED |
					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
		unsigned long phys_addr = virt_to_phys(to);
		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
		pgd_t *pgd = pgd_offset_k(p3_addr);
		pud_t *pud = pud_offset(pgd, p3_addr);
		pmd_t *pmd = pmd_offset(pud, p3_addr);
		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
		pte_t entry;
		unsigned long flags;

		entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
		inc_preempt_count();
BUG_ON(atomic_inc_return(&concurreny_check[(address & CACHE_ALIAS)>>12]) != 1);
		set_pte(pte, entry);
		local_irq_save(flags);
		flush_tlb_one(get_asid(), p3_addr);
		local_irq_restore(flags);
		update_mmu_cache(NULL, p3_addr, entry);
		__copy_user_page((void *)p3_addr, from, to);
		pte_clear(&init_mm, p3_addr, pte);
atomic_dec(&concurreny_check[(address & CACHE_ALIAS)>>12]);
		dec_preempt_count();
	}
}
예제 #30
0
static int map_page(unsigned long va, unsigned long pa, pgprot_t prot)
{
	pgd_t *pge;
	pud_t *pue;
	pmd_t *pme;
	pte_t *pte;
	int err = -ENOMEM;

	spin_lock(&init_mm.page_table_lock);

	/* Use upper 10 bits of VA to index the first level map */
	pge = pgd_offset_k(va);
	pue = pud_offset(pge, va);
	pme = pmd_offset(pue, va);

	/* Use middle 10 bits of VA to index the second-level map */
	pte = pte_alloc_kernel(&init_mm, pme, va);
	if (pte != 0) {
		err = 0;
		set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot));
	}

	spin_unlock(&init_mm.page_table_lock);
	return err;
}