Ejemplo n.º 1
0
static int dup_region(pgd_t *src,pgd_t *dest,
                      viraddr_t start,viraddr_t end)
{
    for(; start < end; start += PAGE_SIZE) {

        pte_t *srcpte =  _page_walk(src,start,0);

        if (!srcpte)
            continue;

        pte_t *destpte =  _page_walk(dest,start,true);

        if (!destpte)
            return -ENOMEM;

        struct page *page = virt2page(pte_page_vaddr(*srcpte));

        pte_clearwrite(srcpte);

        atomic_inc(&page->_count);

        *destpte = *srcpte;
    }

    return 0;
}
Ejemplo n.º 2
0
int exit_mm(struct mm_struct *mm)
{
	pmd_t *pmd;
	pgd_t *pgd;
	uint32_t pgdno, pmdno;
	physaddr_t pa;

	struct vm_area_struct* vma = mm->mmap;
	struct page *page;
	
	if(!mm || !mm->mm_pgd)
		return 0;

	if(!atomic_dec_and_test(&mm->mm_count))
		return 0;

	delete_all_vma(mm);

	for (pgdno = 0; pgdno < pgd_index(KERNEL_BASE_ADDR); pgdno++) {
		pgd = mm->mm_pgd + pgdno;
		if(!pgd_present(*pgd) || pgd_none(*pgd))
			continue;
		pmd_t* tmp = (pmd_t *)pgd_page_vaddr(*pgd);
		
		for (pmdno = 0; pmdno < PTRS_PER_PMD; pmdno++) {
			pmd = tmp +  pmdno;
			if(!pmd_present(*pmd) || pmd_none(*pmd))
				continue;
			struct page* p = virt2page(pmd_page_vaddr(*pmd));
			page_decref(p);
			pmd_set(pmd,0,0);
		}
		struct page* p = virt2page(pgd_page_vaddr(*pgd));
		page_decref(p);
		pgd_set(pgd,0,0);
	}

	page = virt2page((viraddr_t)mm->mm_pgd);
	page_free(page);
	kfree(mm);

	return 0;
}
Ejemplo n.º 3
0
struct page* page_lookup(pgd_t *pgdir, viraddr_t va, pte_t **pte_store)
{
	assert(pgdir);
	pte_t* pte = _page_walk(pgdir, va, false);
	if( pte == NULL ) 
		return NULL;
	if(pte_none(*pte)) 
		return NULL;
	if ( NULL != pte_store )
		*pte_store = pte;
	return virt2page(pte_page_vaddr(*pte));
}
Ejemplo n.º 4
0
static void dump_phy_page_address(struct task_struct *task)
{
    struct vm_area_struct *vma = NULL;
    unsigned long vpage;
    unsigned long pfn;

    if (task->mm && task->mm->mmap) {
        for (vma = task->mm->mmap; vma; vma = vma->vm_next)
            for (vpage = vma->vm_start; vpage < vma->vm_end; vpage += PAGE_SIZE) {
                struct page *p = virt2page(task->mm, vpage);
                if (p) {
                    pfn = page_to_pfn(p);
                    pr_info("linear address: 0x%lx, physical address: 0x%lx\n", vpage, (pfn<<PAGE_SHIFT));
                } else
                    pr_info("linear address: 0x%lx, physical address: not alloc\n", vpage);
            }
    }
}