예제 #1
0
void pgd_free(pgd_t *pgd)
{
	int i;

	pgd_test_and_unpin(pgd);

	/* in the PAE case user pgd entries are overwritten before usage */
	if (PTRS_PER_PMD > 1) {
		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
			kmem_cache_free(pmd_cache, pmd);
		}
		if (!HAVE_SHARED_KERNEL_PMD) {
			unsigned long flags;
			spin_lock_irqsave(&pgd_lock, flags);
			pgd_list_del(pgd);
			spin_unlock_irqrestore(&pgd_lock, flags);
			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
				make_lowmem_page_writable(
					pmd, XENFEAT_writable_page_tables);
				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
				kmem_cache_free(pmd_cache, pmd);
			}
		}
	}
	/* in the non-PAE case, free_pgtables() clears user pgd entries */
	kmem_cache_free(pgd_cache, pgd);
}
예제 #2
0
/*
 * This function zeroes out partial mmap'ed pages at truncation time..
 */
static void partial_clear(struct vm_area_struct *vma, unsigned long address)
{
	pgd_t *page_dir;
	pmd_t *page_middle;
	pte_t *page_table, pte;

	page_dir = pgd_offset(vma->vm_mm, address);
	if (pgd_none(*page_dir))
		return;
	if (pgd_bad(*page_dir)) {
		printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir));
		pgd_clear(page_dir);
		return;
	}
	page_middle = pmd_offset(page_dir, address);
	if (pmd_none(*page_middle))
		return;
	if (pmd_bad(*page_middle)) {
		printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir));
		pmd_clear(page_middle);
		return;
	}
	page_table = pte_offset(page_middle, address);
	pte = *page_table;
	if (!pte_present(pte))
		return;
	flush_cache_page(vma, address);
	address &= ~PAGE_MASK;
	address += pte_page(pte);
	if (address >= high_memory)
		return;
	memset((void *) address, 0, PAGE_SIZE - (address & ~PAGE_MASK));
	flush_page_to_ram(pte_page(pte));
}
예제 #3
0
파일: pgtable.c 프로젝트: mobilipia/iods
void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
	pgd_t *pgd;

	if (mm->context.asce_limit <= limit)
		return;
	__tlb_flush_mm(mm);
	while (mm->context.asce_limit > limit) {
		pgd = mm->pgd;
		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
		case _REGION_ENTRY_TYPE_R2:
			mm->context.asce_limit = 1UL << 42;
			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
						_ASCE_USER_BITS |
						_ASCE_TYPE_REGION3;
			break;
		case _REGION_ENTRY_TYPE_R3:
			mm->context.asce_limit = 1UL << 31;
			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
						_ASCE_USER_BITS |
						_ASCE_TYPE_SEGMENT;
			break;
		default:
			BUG();
		}
		mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
		crst_table_free(mm, (unsigned long *) pgd);
	}
	update_mm(mm, current);
}
예제 #4
0
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	int i;
	pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);

	pgd_test_and_unpin(pgd);

	if (PTRS_PER_PMD == 1 || !pgd)
		return pgd;

	for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
		pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
		if (!pmd)
			goto out_oom;
		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
	}

	if (!HAVE_SHARED_KERNEL_PMD) {
		unsigned long flags;

		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
			if (!pmd)
				goto out_oom;
			set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
		}

		spin_lock_irqsave(&pgd_lock, flags);
		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
			pgd_t *kpgd = pgd_offset_k(v);
			pud_t *kpud = pud_offset(kpgd, v);
			pmd_t *kpmd = pmd_offset(kpud, v);
			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
			memcpy(pmd, kpmd, PAGE_SIZE);
			make_lowmem_page_readonly(
				pmd, XENFEAT_writable_page_tables);
		}
		pgd_list_add(pgd);
		spin_unlock_irqrestore(&pgd_lock, flags);
	}

	return pgd;

out_oom:
	for (i--; i >= 0; i--)
		kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
	kmem_cache_free(pgd_cache, pgd);
	return NULL;
}
예제 #5
0
static inline int copy_pmd_range(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long address, unsigned long size, int cow)
{
	pmd_t * src_pmd, * dst_pmd;
	unsigned long end;
	int error = 0;

	if (pgd_none(*src_pgd))
		return 0;
	if (pgd_bad(*src_pgd)) {
		printk("copy_pmd_range: bad pgd (%08lx)\n", pgd_val(*src_pgd));
		pgd_clear(src_pgd);
		return 0;
	}
	src_pmd = pmd_offset(src_pgd, address);
	if (pgd_none(*dst_pgd)) {
		if (!pmd_alloc(dst_pgd, 0))
			return -ENOMEM;
	}
	dst_pmd = pmd_offset(dst_pgd, address);
	address &= ~PGDIR_MASK;
	end = address + size;
	if (end > PGDIR_SIZE)
		end = PGDIR_SIZE;
	do {
		error = copy_pte_range(dst_pmd++, src_pmd++, address, end - address, cow);
		if (error)
			break;
		address = (address + PMD_SIZE) & PMD_MASK; 
	} while (address < end);
	return error;
}
예제 #6
0
파일: pgtable.c 프로젝트: wesen/lemonix
pgd_t *pgd_alloc(struct mm_struct *mm)
{
    int i;
    pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);

    if (PTRS_PER_PMD == 1 || !pgd)
        return pgd;

    for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
        pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
        if (!pmd)
            goto out_oom;
        paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
        set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
    }
    return pgd;

out_oom:
    for (i--; i >= 0; i--) {
        pgd_t pgdent = pgd[i];
        void* pmd = (void *)__va(pgd_val(pgdent)-1);
        paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
        kmem_cache_free(pmd_cache, pmd);
    }
    kmem_cache_free(pgd_cache, pgd);
    return NULL;
}
예제 #7
0
pgd_t *pgd_alloc(struct mm_struct *mm)
{
    int i;
    pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);

    if (PTRS_PER_PMD == 1 || !pgd)
        return pgd;

    for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
        pmd_t *pmd = pmd_cache_alloc(i);

        if (!pmd)
            goto out_oom;

        paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
        set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
    }
    return pgd;

out_oom:
    for (i--; i >= 0; i--) {
        pgd_t pgdent = pgd[i];
        void* pmd = (void *)__va(pgd_val(pgdent)-1);
        paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
        pmd_cache_free(pmd, i);
    }
    quicklist_free(0, pgd_dtor, pgd);
    return NULL;
}
예제 #8
0
static void walk_pgd_level(struct seq_file *m)
{
#ifdef CONFIG_X86_64
	pgd_t *start = (pgd_t *) &init_level4_pgt;
#else
	pgd_t *start = swapper_pg_dir;
#endif
	int i;
	struct pg_state st;

	memset(&st, 0, sizeof(st));

	for (i = 0; i < PTRS_PER_PGD; i++) {
		st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
		if (!pgd_none(*start)) {
			pgprotval_t prot = pgd_val(*start) & PTE_FLAGS_MASK;

			if (pgd_large(*start) || !pgd_present(*start))
				note_page(m, &st, __pgprot(prot), 1);
			else
				walk_pud_level(m, &st, *start,
					       i * PGD_LEVEL_MULT);
		} else
			note_page(m, &st, __pgprot(0), 1);

		start++;
	}

	/* Flush out the last page */
	st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
	note_page(m, &st, __pgprot(0), 0);
}
예제 #9
0
파일: ptrace.c 프로젝트: andreiw/mkunity
/*
 * This routine gets a long from any process space by following the page
 * tables. NOTE! You should check that the long isn't on a page boundary,
 * and that it is in the task area before calling this: this routine does
 * no checking.
 */
static unsigned long get_long(struct vm_area_struct * vma, unsigned long addr)
{
	pgd_t * pgdir;
	pte_t * pgtable;
	unsigned long page;

repeat:
	pgdir = PAGE_DIR_OFFSET(vma->vm_mm, addr);
	if (pgd_none(*pgdir)) {
		do_no_page(vma, addr, 0);
		goto repeat;
	}
	if (pgd_bad(*pgdir)) {
		printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
		pgd_clear(pgdir);
		return 0;
	}
	pgtable = (pte_t *) (PAGE_PTR(addr) + pgd_page(*pgdir));
	if (!pte_present(*pgtable)) {
		do_no_page(vma, addr, 0);
		goto repeat;
	}
	page = pte_page(*pgtable);
/* this is a hack for non-kernel-mapped video buffers and similar */
	if (page >= high_memory)
		return 0;
	page += addr & ~PAGE_MASK;
	return *(unsigned long *) page;
}
예제 #10
0
static unsigned long get_phys_addr(struct task_struct * p, unsigned long ptr)
{
    pgd_t *page_dir;
    pmd_t *page_middle;
    pte_t pte;

    if (!p || !p->mm || ptr >= TASK_SIZE)
        return 0;
    page_dir = pgd_offset(p->mm,ptr);
    if (pgd_none(*page_dir))
        return 0;
    if (pgd_bad(*page_dir)) {
        printk("bad page directory entry %08lx\n", pgd_val(*page_dir));
        pgd_clear(page_dir);
        return 0;
    }
    page_middle = pmd_offset(page_dir,ptr);
    if (pmd_none(*page_middle))
        return 0;
    if (pmd_bad(*page_middle)) {
        printk("bad page middle entry %08lx\n", pmd_val(*page_middle));
        pmd_clear(page_middle);
        return 0;
    }
    pte = *pte_offset(page_middle,ptr);
    if (!pte_present(pte))
        return 0;
    return pte_page(pte) + (ptr & ~PAGE_MASK);
}
예제 #11
0
static inline int unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
	unsigned long address, unsigned long size,
	unsigned int type, unsigned long page)
{
	pmd_t * pmd;
	unsigned long offset, end;

	if (pgd_none(*dir))
		return 0;
	if (pgd_bad(*dir)) {
		printk("unuse_pgd: bad pgd (%08lx)\n", pgd_val(*dir));
		pgd_clear(dir);
		return 0;
	}
	pmd = pmd_offset(dir, address);
	offset = address & PGDIR_MASK;
	address &= ~PGDIR_MASK;
	end = address + size;
	if (end > PGDIR_SIZE)
		end = PGDIR_SIZE;
	do {
		if (unuse_pmd(vma, pmd, address, end - address, offset, type, page))
			return 1;
		address = (address + PMD_SIZE) & PMD_MASK;
		pmd++;
	} while (address < end);
	return 0;
}
예제 #12
0
static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
{
	pgd_t * pgd;
	pmd_t * pmd;
	pte_t * pte = NULL;

	pgd = pgd_offset(mm, addr);
	if (pgd_none(*pgd))
		goto end;
	if (pgd_bad(*pgd)) {
		printk("move_one_page: bad source pgd (%08lx)\n", pgd_val(*pgd));
		pgd_clear(pgd);
		goto end;
	}

	pmd = pmd_offset(pgd, addr);
	if (pmd_none(*pmd))
		goto end;
	if (pmd_bad(*pmd)) {
		printk("move_one_page: bad source pmd (%08lx)\n", pmd_val(*pmd));
		pmd_clear(pmd);
		goto end;
	}

	pte = pte_offset(pmd, addr);
	if (pte_none(*pte))
		pte = NULL;
end:
	return pte;
}
/*
 * Dump out the page tables associated with 'addr' in mm 'mm'.
 */
void show_pte(struct mm_struct *mm, unsigned long addr)
{
    pgd_t *pgd;

    if (!mm)
        mm = &init_mm;

    pr_alert("pgd = %p\n", mm->pgd);
    pgd = pgd_offset(mm, addr);
    pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));

    do {
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;

        if (pgd_none(*pgd) || pgd_bad(*pgd))
            break;

        pud = pud_offset(pgd, addr);
        if (pud_none(*pud) || pud_bad(*pud))
            break;

        pmd = pmd_offset(pud, addr);
        printk(", *pmd=%016llx", pmd_val(*pmd));
        if (pmd_none(*pmd) || pmd_bad(*pmd))
            break;

        pte = pte_offset_map(pmd, addr);
        printk(", *pte=%016llx", pte_val(*pte));
        pte_unmap(pte);
    } while(0);

    printk("\n");
}
예제 #14
0
static inline void unswap_pgd(struct vm_area_struct * vma, pgd_t *dir,
			      unsigned long address, unsigned long size,
			      unsigned long entry, unsigned long page
			      /* , int isswap */)
{
	pmd_t * pmd;
	unsigned long offset, end;

	if (pgd_none(*dir))
		return;
	if (pgd_bad(*dir)) {
		printk("unswap_pgd: bad pgd (%08lx)\n", pgd_val(*dir));
		pgd_clear(dir);
		return;
	}
	pmd = pmd_offset(dir, address);
	offset = address & PGDIR_MASK;
	address &= ~PGDIR_MASK;
	end = address + size;
	if (end > PGDIR_SIZE)
		end = PGDIR_SIZE;
	do {
		unswap_pmd(vma, pmd, address, end - address, offset, entry,
			   page /* , isswap */);
		address = (address + PMD_SIZE) & PMD_MASK;
		pmd++;
	} while (address < end);
}
예제 #15
0
static inline void
remove_mapping_pmd_range (pgd_t *pgd, unsigned long address, unsigned long size)
{
	pmd_t *pmd;
	unsigned long end;

	if (pgd_none (*pgd))
		return;

	if (pgd_bad (*pgd)){
		printk ("remove_graphics_pmd_range: bad pgd (%08lx)\n", pgd_val (*pgd));
		pgd_clear (pgd);
		return;
	}
	pmd = pmd_offset (pgd, address);
	address &= ~PGDIR_MASK;
	end = address + size;
	if (end > PGDIR_SIZE)
		end = PGDIR_SIZE;
	do {
		remove_mapping_pte_range (pmd, address, end - address);
		address = (address + PMD_SIZE) & PMD_MASK;
		pmd++;
	} while (address < end);
		
}
예제 #16
0
static void walk_pgd(struct r2k_map *k_map)
{
        pgd_t *pgd;
        struct pg_state st;
        unsigned long addr;
        unsigned i;

        memset(&st, 0, sizeof(st));
        st.marker = address_markers;
	st.k_map = k_map;

	pgd = get_global_pgd ();

        for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
#ifdef CONFIG_ARM64
                addr = VA_START + i * PGDIR_SIZE;
#else
		addr = i * PGDIR_SIZE;
#endif
                if (!pgd_none(*pgd)) {
                        walk_pud(&st, pgd, addr);
                } else {
			note_page(&st, addr, 1, pgd_val(*pgd));
                }
        }

	if (!ro)
	        note_page(&st, 0, 0, 0);
}
예제 #17
0
파일: pgtbl.c 프로젝트: Neymello/Composite
/*
 * Verify that the given address in the page table is present.  Return
 * 0 if present, 1 if not.  *This will check the pgd, not for the pte.*
 */
int 
chal_pgtbl_entry_absent(paddr_t pt, unsigned long addr)
{
	pgd_t *pgd = ((pgd_t *)chal_pa2va((void*)pt)) + pgd_index(addr);

	return !((pgd_val(*pgd)) & _PAGE_PRESENT);
}
예제 #18
0
파일: pgd.c 프로젝트: HuxyUK/xpenology-3.x
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pgtable_t pte;

	if (!pgd_base)
		return;

	pgd = pgd_base + pgd_index(0);
	if (pgd_none_or_clear_bad(pgd))
		goto no_pgd;

	pud = pud_offset(pgd, 0);
	if (pud_none_or_clear_bad(pud))
		goto no_pud;

	pmd = pmd_offset(pud, 0);
	if (pmd_none_or_clear_bad(pmd))
		goto no_pmd;

	pte = pmd_pgtable(*pmd);
	pmd_clear(pmd);
	pte_free(mm, pte);
no_pmd:
	pud_clear(pud);
	pmd_free(mm, pmd);
no_pud:
	pgd_clear(pgd);
	pud_free(mm, pud);
no_pgd:
#if defined(CONFIG_SYNO_ARMADA_ARCH)
#ifdef CONFIG_ARM_LPAE
	/*
	 * Free modules/pkmap or identity pmd tables.
	 */
	for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
		if (pgd_none_or_clear_bad(pgd))
			continue;
		if (pgd_val(*pgd) & L_PGD_SWAPPER)
			continue;
		pud = pud_offset(pgd, 0);
		if (pud_none_or_clear_bad(pud))
			continue;
		pmd = pmd_offset(pud, 0);
		pud_clear(pud);
		pmd_free(mm, pmd);
		pgd_clear(pgd);
		pud_free(mm, pud);
	}
#endif
	__pgd_free(pgd_base);
#elif defined(CONFIG_SYNO_COMCERTO)
	free_pages((unsigned long) pgd_base, get_order(16384));
#else
	free_pages((unsigned long) pgd_base, 2);
#endif
}
예제 #19
0
/* now sets up tables using sun3 PTEs rather than i386 as before. --m */
void __init paging_init(void)
{
	pgd_t * pg_dir;
	pte_t * pg_table;
	int i;
	unsigned long address;
	unsigned long next_pgtable;
	unsigned long bootmem_end;
	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
	unsigned long size;


#ifdef TEST_VERIFY_AREA
	wp_works_ok = 0;
#endif
	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
	memset(empty_zero_page, 0, PAGE_SIZE);

	address = PAGE_OFFSET;
	pg_dir = swapper_pg_dir;
	memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
	memset (kernel_pg_dir,  0, sizeof (kernel_pg_dir));

	size = num_pages * sizeof(pte_t);
	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);

	next_pgtable = (unsigned long)alloc_bootmem_pages(size);
	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;

	/* Map whole memory from PAGE_OFFSET (0x0E000000) */
	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;

	while (address < (unsigned long)high_memory) {
		pg_table = (pte_t *) __pa (next_pgtable);
		next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
		pgd_val(*pg_dir) = (unsigned long) pg_table;
		pg_dir++;

		/* now change pg_table to kernel virtual addresses */
		pg_table = (pte_t *) __va ((unsigned long) pg_table);
		for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
			if (address >= (unsigned long)high_memory)
				pte_val (pte) = 0;
			set_pte (pg_table, pte);
			address += PAGE_SIZE;
		}
	}

	mmu_emu_init(bootmem_end);

	current->mm = NULL;

	/* memory sizing is a hack stolen from motorola.c..  hope it works for us */
	zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;

	free_area_init(zones_size);

}
예제 #20
0
unsigned long virtaddr_to_physaddr(struct mm_struct *mm, unsigned long vaddr)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    unsigned long paddr = 0;

    pgd = pgd_offset(mm, vaddr);
    printk("pgd_val = 0x%lx\n", pgd_val(*pgd));
    printk("pgd_index = %lu\n", pgd_index(vaddr));
    if (pgd_none(*pgd)) {
        printk("not mapped in pgd\n");
        return INVALID_ADDR;
    }

    pud = pud_offset(pgd, vaddr);
    printk("pud_val = 0x%lx\n", pud_val(*pud));
    printk("pud_index = %lu\n", pud_index(vaddr));
    if (pud_none(*pud)) {
        printk("not mapped in pud\n");
        return INVALID_ADDR;
    }

    pmd = pmd_offset(pud, vaddr);
    printk("pmd_val = 0x%lx\n", pmd_val(*pmd));
    printk("pmd_index = %lx\n", pmd_index(vaddr));
    if(pmd_none(*pmd)){
        printk("not mapped in pmd\n");
        return INVALID_ADDR;
    }
    /*If pmd_large is true, represent pmd is the last level*/
    if(pmd_large(*pmd)){
        paddr = (pmd_val(*pmd) & PAGE_MASK);
        paddr = paddr | (vaddr & ~PAGE_MASK);
        return paddr;
    }
    /*Walk the forth level page table
    ** you may use PAGE_MASK = 0xfffffffffffff000 to help you get [0:11] bits
    ***/
    else{
        /* XXX: Need to implement */
    	pte = pte_offset_kernel(pmd, vaddr);
    	printk("pte_val = 0x%lx\n", pte_val(*pte));
    	printk("pte_index = %lx\n", pte_index(vaddr));
    	if(pte_none(*pte)){
    	    printk("not mapped in pte\n");
    	    return INVALID_ADDR;
    	}
        paddr = (pte_val(*pte) & PAGE_MASK);
        paddr = paddr | (vaddr & ~PAGE_MASK);
    	printk("paddr = %lx\n", paddr);
    	printk("__pa = %lx\n", __pa(vaddr));   /* magic macro in the kernel */
        /* End of implement */
        return paddr;
    }

}
예제 #21
0
static int mem_write(struct inode * inode, struct file * file,char * buf, int count)
{
	pgd_t *page_dir;
	pmd_t *page_middle;
	pte_t pte;
	char * page;
	struct task_struct * tsk;
	unsigned long addr;
	char *tmp;
	int i;

	if (count < 0)
		return -EINVAL;
	addr = file->f_pos;
	tsk = get_task(inode->i_ino >> 16);
	if (!tsk)
		return -ESRCH;
	tmp = buf;
	while (count > 0) {
		if (current->signal & ~current->blocked)
			break;
		page_dir = pgd_offset(tsk,addr);
		if (pgd_none(*page_dir))
			break;
		if (pgd_bad(*page_dir)) {
			printk("Bad page dir entry %08lx\n", pgd_val(*page_dir));
			pgd_clear(page_dir);
			break;
		}
		page_middle = pmd_offset(page_dir,addr);
		if (pmd_none(*page_middle))
			break;
		if (pmd_bad(*page_middle)) {
			printk("Bad page middle entry %08lx\n", pmd_val(*page_middle));
			pmd_clear(page_middle);
			break;
		}
		pte = *pte_offset(page_middle,addr);
		if (!pte_present(pte))
			break;
		if (!pte_write(pte))
			break;
		page = (char *) pte_page(pte) + (addr & ~PAGE_MASK);
		i = PAGE_SIZE-(addr & ~PAGE_MASK);
		if (i > count)
			i = count;
		memcpy_fromfs(page, tmp, i);
		addr += i;
		tmp += i;
		count -= i;
	}
	file->f_pos = addr;
	if (tmp != buf)
		return tmp-buf;
	if (current->signal & ~current->blocked)
		return -ERESTARTSYS;
	return 0;
}
예제 #22
0
파일: pgd.c 프로젝트: 0x7f454c46/linux
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pgtable_t pte;

	if (!pgd_base)
		return;

	pgd = pgd_base + pgd_index(0);
	if (pgd_none_or_clear_bad(pgd))
		goto no_pgd;

	pud = pud_offset(pgd, 0);
	if (pud_none_or_clear_bad(pud))
		goto no_pud;

	pmd = pmd_offset(pud, 0);
	if (pmd_none_or_clear_bad(pmd))
		goto no_pmd;

	pte = pmd_pgtable(*pmd);
	pmd_clear(pmd);
	pte_free(mm, pte);
	mm_dec_nr_ptes(mm);
no_pmd:
	pud_clear(pud);
	pmd_free(mm, pmd);
	mm_dec_nr_pmds(mm);
no_pud:
	pgd_clear(pgd);
	pud_free(mm, pud);
no_pgd:
#ifdef CONFIG_ARM_LPAE
	/*
	 * Free modules/pkmap or identity pmd tables.
	 */
	for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
		if (pgd_none_or_clear_bad(pgd))
			continue;
		if (pgd_val(*pgd) & L_PGD_SWAPPER)
			continue;
		pud = pud_offset(pgd, 0);
		if (pud_none_or_clear_bad(pud))
			continue;
		pmd = pmd_offset(pud, 0);
		pud_clear(pud);
		pmd_free(mm, pmd);
		mm_dec_nr_pmds(mm);
		pgd_clear(pgd);
		pud_free(mm, pud);
	}
#endif
	__pgd_free(pgd_base);
}
예제 #23
0
파일: pgtbl.c 프로젝트: Neymello/Composite
/* Copy pages non-empty in from, and empty in to */
void 
copy_pgtbl_range_nonzero(paddr_t pt_to, paddr_t pt_from, 
			 unsigned long lower_addr, unsigned long size)
{
	pgd_t *tpgd = ((pgd_t *)chal_pa2va((void*)pt_to)) + pgd_index(lower_addr);
	pgd_t *fpgd = ((pgd_t *)chal_pa2va((void*)pt_from)) + pgd_index(lower_addr);
	unsigned int span = hpage_index(size);
	int i;

	printk("Copying from %p:%d to %p.\n", fpgd, span, tpgd);

	/* sizeof(pgd entry) is intended */
	for (i = 0 ; i < span ; i++) {
		if (!(pgd_val(tpgd[i]) & _PAGE_PRESENT)) {
			if (pgd_val(fpgd[i]) & _PAGE_PRESENT) printk("\tcopying vaddr %lx.\n", lower_addr + i * HPAGE_SHIFT);
			memcpy(&tpgd[i], &fpgd[i], sizeof(pgd_t));
		}
	}
}
예제 #24
0
/*
 * This routine gets a long from any process space by following the page
 * tables. NOTE! You should check that the long isn't on a page boundary,
 * and that it is in the task area before calling this: this routine does
 * no checking.
 */
static unsigned long get_long(struct task_struct * tsk, 
	struct vm_area_struct * vma, unsigned long addr)
{
	pgd_t * pgdir;
	pmd_t * pgmiddle;
	pte_t * pgtable;
	unsigned long page;
	int fault;

repeat:
	pgdir = pgd_offset(vma->vm_mm, addr);
	if (pgd_none(*pgdir)) {
		fault = handle_mm_fault(tsk, vma, addr, 0);
		if (fault > 0)
			goto repeat;
		if (fault < 0)
			force_sig(SIGKILL, tsk);
		return 0;
	}
	if (pgd_bad(*pgdir)) {
		printk("ptrace[1]: bad page directory %lx\n", pgd_val(*pgdir));
		pgd_clear(pgdir);
		return 0;
	}
	pgmiddle = pmd_offset(pgdir, addr);
	if (pmd_none(*pgmiddle)) {
		fault = handle_mm_fault(tsk, vma, addr, 0);
		if (fault > 0)
			goto repeat;
		if (fault < 0)
			force_sig(SIGKILL, tsk);
		return 0;
	}
	if (pmd_bad(*pgmiddle)) {
		printk("ptrace[3]: bad pmd %lx\n", pmd_val(*pgmiddle));
		pmd_clear(pgmiddle);
		return 0;
	}
	pgtable = pte_offset(pgmiddle, addr);
	if (!pte_present(*pgtable)) {
		fault = handle_mm_fault(tsk, vma, addr, 0);
		if (fault > 0)
			goto repeat;
		if (fault < 0)
			force_sig(SIGKILL, tsk);
		return 0;
	}
	page = pte_page(*pgtable);
/* this is a hack for non-kernel-mapped video buffers and similar */
	if (MAP_NR(page) >= max_mapnr)
		return 0;
	page += addr & ~PAGE_MASK;
	return *(unsigned long *) page;
}
예제 #25
0
static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
{
	unsigned long p4d;

	if (!pgtable_l5_enabled())
		return (p4d_t *)pgd;

	p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
	p4d += __START_KERNEL_map - phys_base;
	return (p4d_t *)p4d + p4d_index(addr);
}
예제 #26
0
파일: pgtbl.c 프로젝트: Neymello/Composite
void 
pgtbl_print_path(paddr_t pgtbl, unsigned long addr)
{
	pgd_t *pt = ((pgd_t *)chal_pa2va((void*)pgtbl)) + pgd_index(addr);
	pte_t *pe = pgtbl_lookup_address(pgtbl, addr);
	
	printk("cos: addr %x, pgd entry - %x, pte entry - %x\n", 
	       (unsigned int)addr, (unsigned int)pgd_val(*pt), (unsigned int)pte_val(*pe));

	return;
}
예제 #27
0
/*
 * This contains the code to setup the memory map on an ARM2/ARM250/ARM3
 * machine. This is both processor & architecture specific, and requires
 * some more work to get it to fit into our separate processor and
 * architecture structure.
 */
void __init memtable_init(struct meminfo *mi)
{
	pte_t *pte;
	int i;

	page_nr = max_low_pfn;

	pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
	pte[0] = mk_pte_phys(PAGE_OFFSET + 491520, PAGE_READONLY);
	pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte);

	for (i = 1; i < PTRS_PER_PGD; i++)
		pgd_val(swapper_pg_dir[i]) = 0;
}
예제 #28
0
파일: init_32.c 프로젝트: 274914765/C
/*
 * Creates a middle page table and puts a pointer to it in the
 * given global directory entry. This only returns the gd entry
 * in non-PAE compilation mode, since the middle layer is folded.
 */
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
    pud_t *pud;
    pmd_t *pmd_table;

#ifdef CONFIG_X86_PAE
    if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
        pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);

        paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
        set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
        pud = pud_offset(pgd, 0);
        BUG_ON(pmd_table != pmd_offset(pud, 0));
    }
예제 #29
0
/*
 * This routine puts a long into any process space by following the page
 * tables. NOTE! You should check that the long isn't on a page boundary,
 * and that it is in the task area before calling this: this routine does
 * no checking.
 *
 * Now keeps R/W state of page so that a text page stays readonly
 * even if a debugger scribbles breakpoints into it.  -M.U-
 */
static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr,
	unsigned long data)
{
	pgd_t *pgdir;
	pmd_t *pgmiddle;
	pte_t *pgtable;
	unsigned long page;
		
repeat:
	pgdir = pgd_offset(vma->vm_mm, addr);
	if (!pgd_present(*pgdir)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	if (pgd_bad(*pgdir)) {
		printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
		pgd_clear(pgdir);
		return;
	}
	pgmiddle = pmd_offset(pgdir,addr);
	if (pmd_none(*pgmiddle)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	if (pmd_bad(*pgmiddle)) {
		printk("ptrace: bad page directory %08lx\n",
		       pmd_val(*pgmiddle));
		pmd_clear(pgmiddle);
		return;
	}
	pgtable = pte_offset(pgmiddle, addr);
	if (!pte_present(*pgtable)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	page = pte_page(*pgtable);
	if (!pte_write(*pgtable)) {
		do_wp_page(tsk, vma, addr, 2);
		goto repeat;
	}
/* this is a hack for non-kernel-mapped video buffers and similar */
	if (page < high_memory) {
		*(unsigned long *) (page + (addr & ~PAGE_MASK)) = data;
		flush_page_to_ram (page);
	}
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
/* this should also re-instate whatever read-only mode there was before */
	*pgtable = pte_mkdirty(mk_pte(page, vma->vm_page_prot));
	flush_tlb_all();
}
예제 #30
0
파일: pgtbl.c 프로젝트: Neymello/Composite
void 
chal_pgtbl_zero_range(paddr_t pt, unsigned long lower_addr, unsigned long size)
{
	pgd_t *pgd = ((pgd_t *)chal_pa2va((void*)pt)) + pgd_index(lower_addr);
	unsigned int span = hpage_index(size);

	if (!(pgd_val(*pgd)) & _PAGE_PRESENT) {
		printk("cos: BUG: nothing to copy from pgd @ %x.\n", 
		       (unsigned int)lower_addr);
	}

	/* sizeof(pgd entry) is intended */
	memset(pgd, 0, span*sizeof(pgd_t));
}