Exemple #1
0
static void 
do_page_fault(struct frame *tf)
{
	struct vm_area_struct *vma;
	struct mm_struct *mm = curtask->mm;
	viraddr_t address = rcr2();
	
	if (address >= KERNEL_BASE_ADDR) {
		//
	}
	
	vma = find_vma(mm, address);


	if (!vma || vma->vm_start > address) {
		printk("task [%08d] access invalid vma:%x, exiting\n",curtask->pid,address);
		do_exit(curtask);
	} else {
		pte_t *pte = _page_walk (task2pgd(curtask),address,true);
		if (!pte) {
			printk("!pte do_page_fault!!!!\n");
			do_exit(curtask);
		}
		handle_pte_fault(mm, vma, address, pte,tf->tf_trapno);
	}
}
Exemple #2
0
/*
 * By the time we get here, we already hold the mm semaphore
 */
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
	unsigned long address, int write_access)
{
	int ret = -1;
	pgd_t *pgd;
	pmd_t *pmd;

	pgd = pgd_offset(mm, address);
	pmd = pmd_alloc(pgd, address);

	if (pmd) {
		pte_t * pte = pte_alloc(pmd, address);
		if (pte)
			ret = handle_pte_fault(mm, vma, address, write_access, pte);
	}
	return ret;
}
Exemple #3
0
void handle_mm_fault(struct vm_area_struct *vma, unsigned long address,int write_access)
{
    pgd_t * pgd;
    pmd_t * pmd;
    pte_t * pte;

    pgd = pgd_offset(vma->vm_task, address);
    pmd = pmd_alloc(pgd,address);

    if(!pmd)
        goto no_memory;
    pte = pte_alloc(pmd, address);
    if(!pte)
        goto no_memory;
    handle_pte_fault(vma,address,pte,write_access);
no_memory:
    oom();
}
Exemple #4
0
void handle_mm_fault(struct vm_area_struct * vma, unsigned long address,
	int write_access)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	pgd = pgd_offset(vma->vm_mm, address);
	pmd = pmd_alloc(pgd, address);
	if (!pmd)
		goto no_memory;
	pte = pte_alloc(pmd, address);
	if (!pte)
		goto no_memory;
	handle_pte_fault(vma, address, write_access, pte);
	update_mmu_cache(vma, address, *pte);
	return;
no_memory:
	oom(current);
}
Exemple #5
0
/*
 * By the time we get here, we already hold the mm semaphore
 */
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
	unsigned long address, int write_access)
{
	pgd_t *pgd;
	pmd_t *pmd;

	current->state = TASK_RUNNING;
	pgd = pgd_offset(mm, address);

	/*
	 * We need the page table lock to synchronize with kswapd
	 * and the SMP-safe atomic PTE updates.
	 */
	spin_lock(&mm->page_table_lock);
	pmd = pmd_alloc(mm, pgd, address);

	if (pmd) {
		pte_t * pte = pte_alloc(mm, pmd, address);
		if (pte)
			return handle_pte_fault(mm, vma, address, write_access, pte);
	}
	spin_unlock(&mm->page_table_lock);
	return -1;
}