Пример #1
0
static void pt_release_pml4(pte_t *pml4, virt_t from, virt_t to)
{
	virt_t vaddr = from;

	for (int i = pml4_i(from); vaddr != to; ++i) {
		const pte_t pte = pml4[i];
		const virt_t bytes =
			MINU(PML3_SIZE - (vaddr & PML3_MASK), to - vaddr);
		const pfn_t pages = bytes >> PAGE_BITS;

		if (pte_present(pte)) {
			const phys_t paddr = pte_phys(pte);
			const pfn_t pfn = paddr >> PAGE_BITS;
			struct page *pt = pfn2page(pfn);

			pt_release_pml3(va(paddr), vaddr, vaddr + bytes);
			pt->u.refcount -= pages;

			if (pt->u.refcount == 0) {
				pml4[i] = 0;
				free_page_table(pt);
			}
		}
		vaddr += bytes;	
	}
}
Пример #2
0
static int pt_populate_pml2(pte_t *pml2, virt_t from, virt_t to, pte_t flags)
{
	virt_t vaddr = from;

	for (int i = pml2_i(from); vaddr != to; ++i) {
		const virt_t bytes =
			MINU(PML1_SIZE - (vaddr & PML1_MASK), to - vaddr);
		const pfn_t pages = bytes >> PAGE_BITS;

		if (!pte_present(pml2[i]) && !pte_large(flags)) {
			struct page *pt = alloc_page_table(flags);

			if (!pt) {
				pt_release_pml2(pml2, from, vaddr);
				return -ENOMEM;
			}

			pt->u.refcount += pages;
			pml2[i] = page_paddr(pt) | flags;
		} else if (pte_present(pml2[i]) && !pte_large(pml2[i])) {
			const pfn_t pfn = pte_phys(pml2[i]) >> PAGE_BITS;
			struct page *pt = pfn2page(pfn);

			pt->u.refcount += pages;
		}
		vaddr += bytes;
	}

	return 0;
}
Пример #3
0
static struct kmem_slab *kmem_get_slab(void *ptr)
{
	const pfn_t pfn = PA(ptr) >> PAGE_BITS;

	return pfn2page(pfn)->u.slab;
}