Exemple #1
0
static int copy_one_pte(struct vm_area_struct *vma, pte_t * src, pte_t * dst,
			unsigned long old_addr, unsigned long new_addr,
			struct pte_chain ** pte_chainp)
{
	int error = 0;
	pte_t pte;
	struct page * page = NULL;

	if (pte_present(*src))
		page = pte_page(*src);

	if (!pte_none(*src)) {
		if (page)
			page_remove_rmap(page, src);
		pte = vm_ptep_get_and_clear(vma, old_addr, src);
		if (!dst) {
			/* No dest?  We must put it back. */
			dst = src;
			error++;
		}
		vm_set_pte(vma, new_addr, dst, pte);
		if (page)
			*pte_chainp = page_add_rmap(page, dst, *pte_chainp);
	}
	return error;
}
Exemple #2
0
/* mmlist_lock and vma->vm_mm->page_table_lock are held */
static void
unuse_pte(struct vm_area_struct * vma, unsigned long address, pte_t * dir,
       swp_entry_t entry, struct page * page, struct pte_chain ** pte_chainp)
{
	pte_t pte = *dir;

	if (likely(pte_to_swp_entry(pte).val != entry.val))
		return;
	if (unlikely(pte_none(pte) || pte_present(pte)))
		return;
	get_page(page);
	vm_set_pte(vma, address, dir, pte_mkold(mk_pte(page, vma->vm_page_prot)));
	*pte_chainp = page_add_rmap(page, dir, *pte_chainp);
	swap_free(entry);
	++vma->vm_mm->rss;
}
Exemple #3
0
void vm_init_paging(struct multiboot_info *boot_info)
{
	struct x86_cpuid_info cpuid_info;
	struct frame *pgdir_frame;
	struct frame *pgtab_frame;
	pte_t *pgtab;
	ulong_t paddr, mem_max;

	/*
	 * Check CPUID instruction to see if large pages (PSE feature)
	 * is supported.
	 */
	PANIC_IF(!x86_cpuid(&cpuid_info), "GeekOS requires a Pentium-class CPU");
	PANIC_IF(!cpuid_info.feature_info_edx.pse, "Processor does not support PSE");
	cons_printf("CPU supports PSE\n");

	/*
	 * Enable PSE by setting the PSE bit in CR4.
	 */
	x86_set_cr4(x86_get_cr4() | CR4_PSE);

	/*
	 * Allocate kernel page directory.
	 */
	pgdir_frame = mem_alloc_frame(FRAME_KERN, 1);
	s_kernel_pagedir = mem_frame_to_pa(pgdir_frame);
	memset(s_kernel_pagedir, '\0', PAGE_SIZE);

	/*
	 * We will support at most 2G of physical memory.
	 */
	mem_max = ((ulong_t) boot_info->mem_upper) * 1024;
	if (mem_max > (1 << 31)) {
		mem_max = (ulong_t) (1 << 31);
	}

	/*
	 * We need a page table for the low 4M of the kernel address space,
	 * since we want to leave the zero page unmapped (to catch null pointer derefs).
	 */
	pgtab_frame = mem_alloc_frame(FRAME_KERN, 1);
	pgtab = mem_frame_to_pa(pgtab_frame);
	memset(pgtab, '\0', PAGE_SIZE);

	/*
	 * Initialize low page table, leaving page 0 unmapped
	 */
	for (paddr = PAGE_SIZE; paddr < VM_PT_SPAN; paddr += PAGE_SIZE) {
		vm_set_pte(pgtab, VM_WRITE|VM_READ|VM_EXEC, paddr, paddr);
	}

	/*
	 * Add low page table to the kernel pagedir.
	 */
	vm_set_pde(s_kernel_pagedir, VM_WRITE|VM_READ|VM_EXEC, 0UL, (u32_t) pgtab);

	/*
	 * Use 4M pages to map the rest of the low 2G of memory
	 */
	for (paddr = VM_PT_SPAN; paddr < mem_max; paddr += VM_PT_SPAN) {
		vm_set_pde_4m(s_kernel_pagedir, VM_WRITE|VM_READ|VM_EXEC, paddr, paddr);
	}

	/*
	 * Turn on paging!
	 */
	x86_set_cr3((u32_t) s_kernel_pagedir); /* set the kernel page directory */
	x86_set_cr0(x86_get_cr0() | CR0_PG);   /* turn on the paging bit in cr0 */

	cons_printf("Paging enabled\n");
}