Beispiel #1
0
static int pt_populate_pml2(pte_t *pml2, virt_t from, virt_t to, pte_t flags)
{
	virt_t vaddr = from;

	for (int i = pml2_i(from); vaddr != to; ++i) {
		const virt_t bytes =
			MINU(PML1_SIZE - (vaddr & PML1_MASK), to - vaddr);
		const pfn_t pages = bytes >> PAGE_BITS;

		if (!pte_present(pml2[i]) && !pte_large(flags)) {
			struct page *pt = alloc_page_table(flags);

			if (!pt) {
				pt_release_pml2(pml2, from, vaddr);
				return -ENOMEM;
			}

			pt->u.refcount += pages;
			pml2[i] = page_paddr(pt) | flags;
		} else if (pte_present(pml2[i]) && !pte_large(pml2[i])) {
			const pfn_t pfn = pte_phys(pml2[i]) >> PAGE_BITS;
			struct page *pt = pfn2page(pfn);

			pt->u.refcount += pages;
		}
		vaddr += bytes;
	}

	return 0;
}
Beispiel #2
0
void setup_paging(void)
{
	struct page *page = alloc_page_table(PTE_LOW);

	DBG_ASSERT(page != 0);

	const phys_t paddr = page_paddr(page);
	pte_t *pt = va(paddr);

	DBG_ASSERT(setup_fixed_mapping(pt) == 0);
	DBG_ASSERT(setup_kernel_mapping(pt) == 0);
	DBG_ASSERT(setup_kmap_mapping(pt) == 0);
	store_pml4(paddr);
}
Beispiel #3
0
static int pt_populate_pml4(pte_t *pml4, virt_t from, virt_t to, pte_t flags)
{
	virt_t vaddr = from;

	for (int i = pml4_i(from); vaddr < to; ++i) {
		struct page *pt;
		phys_t paddr;
		const virt_t bytes =
			MINU(PML3_SIZE - (vaddr & PML3_MASK), to - vaddr);
		const pfn_t pages = bytes >> PAGE_BITS;

		if (!pte_present(pml4[i])) {
			pt = alloc_page_table(flags);

			if (!pt) {
				pt_release_pml4(pml4, from, vaddr);
				return -ENOMEM;
			}

			paddr = page_paddr(pt);
			pml4[i] = paddr | (flags & ~PTE_LARGE);
		} else {
			const pte_t pte = pml4[i];

			paddr = pte_phys(pte);
			pt = pfn2page(paddr >> PAGE_BITS);
		}
		pt->u.refcount += pages;

		const int rc = pt_populate_pml3(va(paddr), vaddr, vaddr + bytes,
					flags);

		if (rc) {
			pt_release_pml4(pml4, from, vaddr);
			pt->u.refcount -= pages;

			if (pt->u.refcount == 0) {
				pml4[i] = 0;
				free_page_table(pt);
			}

			return rc;
		}

		vaddr += bytes;
	}

	return 0;
}
Beispiel #4
0
/*
 * Update page table according to isp virtual address and page physical
 * address
 */
static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
		   phys_addr_t phys, unsigned int pgnr)
{
	unsigned int start, end;
	phys_addr_t l1_pt;
	int ret;

	mutex_lock(&mmu->pt_mutex);
	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
		/*
		 * allocate 1 new page for L1 page table
		 */
		l1_pt = alloc_page_table(mmu);
		if (l1_pt == NULL_PAGE) {
			dev_err(atomisp_dev, "alloc page table fail.\n");
			mutex_unlock(&mmu->pt_mutex);
			return -ENOMEM;
		}

		/*
		 * setup L1 page table physical addr to MMU
		 */
		ret = mmu->driver->set_pd_base(mmu, l1_pt);
		if (ret) {
			dev_err(atomisp_dev,
				 "set page directory base address fail.\n");
			mutex_unlock(&mmu->pt_mutex);
			return ret;
		}
		mmu->base_address = l1_pt;
		mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
		memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
	}

	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);

	start = (isp_virt) & ISP_PAGE_MASK;
	end = start + (pgnr << ISP_PAGE_OFFSET);
	phys &= ISP_PAGE_MASK;

	ret = mmu_l1_map(mmu, l1_pt, start, end, phys);

	if (ret)
		dev_err(atomisp_dev, "setup mapping in L1PT fail.\n");

	mutex_unlock(&mmu->pt_mutex);
	return ret;
}
Beispiel #5
0
static pte_t *arm64_mmu_get_page_table(vaddr_t index, uint page_size_shift, pte_t *page_table)
{
    pte_t pte;
    paddr_t paddr;
    void *vaddr;
    int ret;

    pte = page_table[index];
    switch (pte & MMU_PTE_DESCRIPTOR_MASK) {
    case MMU_PTE_DESCRIPTOR_INVALID:
        ret = alloc_page_table(&paddr, page_size_shift);
        if (ret) {
            TRACEF("failed to allocate page table\n");
            return NULL;
        }
        vaddr = paddr_to_kvaddr(paddr);
        LTRACEF("allocated page table, vaddr %p, paddr 0x%lx\n", vaddr, paddr);
        memset(vaddr, MMU_PTE_DESCRIPTOR_INVALID, 1U << page_size_shift);
        __asm__ volatile("dmb ishst" ::: "memory");
        pte = paddr | MMU_PTE_L012_DESCRIPTOR_TABLE;
        page_table[index] = pte;
        LTRACEF("pte %p[0x%lx] = 0x%llx\n", page_table, index, pte);
        return vaddr;

    case MMU_PTE_L012_DESCRIPTOR_TABLE:
        paddr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
        LTRACEF("found page table 0x%lx\n", paddr);
        return paddr_to_kvaddr(paddr);

    case MMU_PTE_L012_DESCRIPTOR_BLOCK:
        return NULL;

    default:
        PANIC_UNIMPLEMENTED;
    }
}
Beispiel #6
0
/*
 * Update L1 page table according to isp virtual address and page physical
 * address
 */
static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
		      unsigned int start, unsigned int end,
		      phys_addr_t phys)
{
	phys_addr_t l2_pt;
	unsigned int ptr, l1_aligned;
	unsigned int idx;
	unsigned int l2_pte;
	int ret;

	l1_pt &= ISP_PAGE_MASK;

	start = start & ISP_PAGE_MASK;
	end = ISP_PAGE_ALIGN(end);
	phys &= ISP_PAGE_MASK;

	ptr = start;
	do {
		idx = ISP_PTR_TO_L1_IDX(ptr);

		l2_pte = atomisp_get_pte(l1_pt, idx);

		if (!ISP_PTE_VALID(mmu, l2_pte)) {
			l2_pt = alloc_page_table(mmu);
			if (l2_pt == NULL_PAGE) {
				dev_err(atomisp_dev,
					     "alloc page table fail.\n");

				/* free all mapped pages */
				free_mmu_map(mmu, start, ptr);

				return -ENOMEM;
			}

			l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);

			atomisp_set_pte(l1_pt, idx, l2_pte);
			mmu->l2_pgt_refcount[idx] = 0;
		}

		l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);

		l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);

		if (l1_aligned < end) {
			ret = mmu_l2_map(mmu, l1_pt, idx,
					   l2_pt, ptr, l1_aligned, phys);
			phys += (l1_aligned - ptr);
			ptr = l1_aligned;
		} else {
			ret = mmu_l2_map(mmu, l1_pt, idx,
					   l2_pt, ptr, end, phys);
			phys += (end - ptr);
			ptr = end;
		}

		if (ret) {
			dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");

			/* free all mapped pages */
			free_mmu_map(mmu, start, ptr);

			return -EINVAL;
		}
	} while (ptr < end && idx < ISP_L1PT_PTES);

	return 0;
}