示例#1
0
struct pt_iter *pt_iter_set(struct pt_iter *iter, pte_t *pml4, virt_t addr)
{
	memset(iter, 0, sizeof(*iter));

	if (!pml4)
		return iter;

	int level = PT_MAX_LEVEL;
	int idx = pml4_i(addr);
	pte_t pte = pml4[idx];

	iter->pt[level] = pml4;
	iter->idx[level] = idx;

	while (pte_present(pte) && level != 0 && !pte_large(pte)) {
		const phys_t paddr = pte_phys(pte);
		pte_t *pt = va(paddr);

		idx = pt_index(addr, level--);
		pte = pt[idx];

		iter->idx[level] = idx;
		iter->pt[level] = pt;
	}

	iter->addr = pt_iter_addr(iter);
	iter->level = level;

	return iter;
}
示例#2
0
static void pt_release_pml4(pte_t *pml4, virt_t from, virt_t to)
{
	virt_t vaddr = from;

	for (int i = pml4_i(from); vaddr != to; ++i) {
		const pte_t pte = pml4[i];
		const virt_t bytes =
			MINU(PML3_SIZE - (vaddr & PML3_MASK), to - vaddr);
		const pfn_t pages = bytes >> PAGE_BITS;

		if (pte_present(pte)) {
			const phys_t paddr = pte_phys(pte);
			const pfn_t pfn = paddr >> PAGE_BITS;
			struct page *pt = pfn2page(pfn);

			pt_release_pml3(va(paddr), vaddr, vaddr + bytes);
			pt->u.refcount -= pages;

			if (pt->u.refcount == 0) {
				pml4[i] = 0;
				free_page_table(pt);
			}
		}
		vaddr += bytes;	
	}
}
示例#3
0
phys_t get_phys_adr(virt_t vad) {
    phys_t pad;
    pte_t *pml4e = pml4 + pml4_i(vad);
    pte_t *pdpte = ((pte_t *) va(pte_phys(*pml4e) << 12)) + pml3_i(vad);
    pte_t *pde = ((pte_t *) va(pte_phys(*pdpte) << 12)) + pml2_i(vad);
    if (pte_large(*pde)) {
        pad = ((*pde & (~((1 << 21) - 1)))) | (vad & ((1 << 21) - 1));
        return pad;
    }

    pte_t *pte = ((pte_t *) va(pte_phys(*pde) << 12)) + pml1_i(vad);
    pad = ((*pte & (~((1 << 12) - 1)))) | (vad & ((1 << 12) - 1));;
    return pad;
}
示例#4
0
static int pt_populate_pml4(pte_t *pml4, virt_t from, virt_t to, pte_t flags)
{
	virt_t vaddr = from;

	for (int i = pml4_i(from); vaddr < to; ++i) {
		struct page *pt;
		phys_t paddr;
		const virt_t bytes =
			MINU(PML3_SIZE - (vaddr & PML3_MASK), to - vaddr);
		const pfn_t pages = bytes >> PAGE_BITS;

		if (!pte_present(pml4[i])) {
			pt = alloc_page_table(flags);

			if (!pt) {
				pt_release_pml4(pml4, from, vaddr);
				return -ENOMEM;
			}

			paddr = page_paddr(pt);
			pml4[i] = paddr | (flags & ~PTE_LARGE);
		} else {
			const pte_t pte = pml4[i];

			paddr = pte_phys(pte);
			pt = pfn2page(paddr >> PAGE_BITS);
		}
		pt->u.refcount += pages;

		const int rc = pt_populate_pml3(va(paddr), vaddr, vaddr + bytes,
					flags);

		if (rc) {
			pt_release_pml4(pml4, from, vaddr);
			pt->u.refcount -= pages;

			if (pt->u.refcount == 0) {
				pml4[i] = 0;
				free_page_table(pt);
			}

			return rc;
		}

		vaddr += bytes;
	}

	return 0;
}
示例#5
0
static int pt_index(virt_t vaddr, int level)
{
	switch (level) {
	case 4:
		return pml4_i(vaddr);
	case 3:
		return pml3_i(vaddr);
	case 2:
		return pml2_i(vaddr);
	case 1:
		return pml1_i(vaddr);
	}

	DBG_ASSERT(0 && "Unreachable");
	return 0;
}
示例#6
0
void map_adr(virt_t vad, phys_t pad, int flags) {
    if (flags & USE_BIG_PAGE) {
        assert((vad & ((1 << (12 + 9)) - 1)) == 0);
        assert((pad & ((1 << (12 + 9)) - 1)) == 0);
    } else {
        assert((vad & ((1 << (12)) - 1)) == 0);
        assert((pad & ((1 << (12)) - 1)) == 0);
    }

    pte_t *pml4e = pml4 + pml4_i(vad);

    force_pte(pml4e, flags);

    pte_t *pdpte = ((pte_t *) va(pte_phys(*pml4e) << 12)) + pml3_i(vad);

    force_pte(pdpte, flags);

    pte_t *pde = ((pte_t *) va(pte_phys(*pdpte) << 12)) + pml2_i(vad);
    if (flags & USE_BIG_PAGE) {
        assert(pte_present(*pde) == false);
        *pde = pad | PTE_PRESENT | PTE_WRITE | PTE_LARGE;

        flush_tlb_addr(vad);
        return;
    }

    force_pte(pde, flags);

    pte_t *pte = ((pte_t *) va(pte_phys(*pde) << 12)) + pml1_i(vad);
    assert(pte_present(*pte) == false);
    *pte = pad | PTE_PRESENT | PTE_WRITE;

    if (!(flags & NOT_FLUSH_TLB)) {
        flush_tlb_addr(vad);
    }
}
示例#7
0
文件: paging.c 项目: imbd/OS
static inline pte_t * __pml4e(pte_t * pml4, virt_t addr) {
	return pml4 + pml4_i(addr);
}