Esempio n. 1
0
/*Free L1 and L2 page table*/
void isp_mmu_exit(struct isp_mmu *mmu)
{
	unsigned int idx;
	unsigned int pte;
	phys_addr_t l1_pt, l2_pt;

	if (!mmu)
		return;

	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
		dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
			    (unsigned int)mmu->l1_pte);
		return;
	}

	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);

	for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
		pte = atomisp_get_pte(l1_pt, idx);

		if (ISP_PTE_VALID(mmu, pte)) {
			l2_pt = isp_pte_to_pgaddr(mmu, pte);

			free_page_table(mmu, l2_pt);
		}
	}

	free_page_table(mmu, l1_pt);

	kmem_cache_destroy(mmu->tbl_cache);
}
Esempio n. 2
0
/*
 * Free L2 page table according to isp virtual address and page physical
 * address
 */
static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
			   unsigned int l1_idx, phys_addr_t l2_pt,
			   unsigned int start, unsigned int end)
{

	unsigned int ptr;
	unsigned int idx;
	unsigned int pte;

	l2_pt &= ISP_PAGE_MASK;

	start = start & ISP_PAGE_MASK;
	end = ISP_PAGE_ALIGN(end);

	ptr = start;
	do {
		idx = ISP_PTR_TO_L2_IDX(ptr);

		pte = atomisp_get_pte(l2_pt, idx);

		if (!ISP_PTE_VALID(mmu, pte))
			mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
						 l2_pt, idx, ptr, pte);

		atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
		mmu->l2_pgt_refcount[l1_idx]--;
		ptr += (1U << ISP_L2PT_OFFSET);
	} while (ptr < end && idx < ISP_L2PT_PTES - 1);

	if (mmu->l2_pgt_refcount[l1_idx] == 0) {
		free_page_table(mmu, l2_pt);
		atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
	}
}
/* cleanup empty L2 page tables */
void isp_mmu_clean_l2(struct isp_mmu *mmu)
{
	unsigned int idx, idx2;
	unsigned int pte;
	phys_addr_t l1_pt, l2_pt;

	if (!mmu)
		return;

	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
		dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
			    (unsigned int)mmu->l1_pte);
		return;
	}

	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);

	for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
		bool l2_pt_is_empty = true;

		pte = atomisp_get_pte(l1_pt, idx);
		if (!ISP_PTE_VALID(mmu, pte))
			continue;

		l2_pt = isp_pte_to_pgaddr(mmu, pte);
		for (idx2 = 0; idx2 < ISP_L2PT_PTES; idx2++) {
			if (atomisp_get_pte(l2_pt, idx2) !=
			    mmu->driver->null_pte) {
				l2_pt_is_empty = false;
				break;
			}
		}

		if (l2_pt_is_empty) {
			free_page_table(mmu, l2_pt);
			atomisp_set_pte(l1_pt, idx, mmu->driver->null_pte);
			dev_dbg(atomisp_dev, "free l1_pte index %d\n", idx);
		}
	}
}
Esempio n. 4
0
/*
 * Free L1 page table according to isp virtual address and page physical
 * address
 */
static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
			   unsigned int start, unsigned int end)
{
	phys_addr_t l2_pt;
	unsigned int ptr, l1_aligned;
	unsigned int idx;
	unsigned int l2_pte;

	l1_pt &= ISP_PAGE_MASK;

	start = start & ISP_PAGE_MASK;
	end = ISP_PAGE_ALIGN(end);

	ptr = start;
	do {
		idx = ISP_PTR_TO_L1_IDX(ptr);

		l2_pte = atomisp_get_pte(l1_pt, idx);

		if (!ISP_PTE_VALID(mmu, l2_pte)) {
			mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
			continue;
		}

		l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);

		l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);

		if (l1_aligned < end) {
			mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
			ptr = l1_aligned;
		} else {
			mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
			ptr = end;
		}
		/*
		 * use the same L2 page next time, so we dont
		 * need to invalidate and free this PT.
		 */
		/*      atomisp_set_pte(l1_pt, idx, NULL_PTE); */
	} while (ptr < end && idx < ISP_L1PT_PTES);
}
Esempio n. 5
0
/*
 * Update L2 page table according to isp virtual address and page physical
 * address
 */
static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
		      unsigned int l1_idx, phys_addr_t l2_pt,
		      unsigned int start, unsigned int end, phys_addr_t phys)
{
	unsigned int ptr;
	unsigned int idx;
	unsigned int pte;

	l2_pt &= ISP_PAGE_MASK;

	start = start & ISP_PAGE_MASK;
	end = ISP_PAGE_ALIGN(end);
	phys &= ISP_PAGE_MASK;

	ptr = start;
	do {
		idx = ISP_PTR_TO_L2_IDX(ptr);

		pte = atomisp_get_pte(l2_pt, idx);

		if (ISP_PTE_VALID(mmu, pte)) {
			mmu_remap_error(mmu, l1_pt, l1_idx,
					  l2_pt, idx, ptr, pte, phys);

			/* free all mapped pages */
			free_mmu_map(mmu, start, ptr);

			return -EINVAL;
		}

		pte = isp_pgaddr_to_pte_valid(mmu, phys);

		atomisp_set_pte(l2_pt, idx, pte);
		mmu->l2_pgt_refcount[l1_idx]++;
		ptr += (1U << ISP_L2PT_OFFSET);
		phys += (1U << ISP_L2PT_OFFSET);
	} while (ptr < end && idx < ISP_L2PT_PTES - 1);

	return 0;
}
Esempio n. 6
0
/*
 * Update L1 page table according to isp virtual address and page physical
 * address
 */
static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
		      unsigned int start, unsigned int end,
		      phys_addr_t phys)
{
	phys_addr_t l2_pt;
	unsigned int ptr, l1_aligned;
	unsigned int idx;
	unsigned int l2_pte;
	int ret;

	l1_pt &= ISP_PAGE_MASK;

	start = start & ISP_PAGE_MASK;
	end = ISP_PAGE_ALIGN(end);
	phys &= ISP_PAGE_MASK;

	ptr = start;
	do {
		idx = ISP_PTR_TO_L1_IDX(ptr);

		l2_pte = atomisp_get_pte(l1_pt, idx);

		if (!ISP_PTE_VALID(mmu, l2_pte)) {
			l2_pt = alloc_page_table(mmu);
			if (l2_pt == NULL_PAGE) {
				dev_err(atomisp_dev,
					     "alloc page table fail.\n");

				/* free all mapped pages */
				free_mmu_map(mmu, start, ptr);

				return -ENOMEM;
			}

			l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);

			atomisp_set_pte(l1_pt, idx, l2_pte);
			mmu->l2_pgt_refcount[idx] = 0;
		}

		l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);

		l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);

		if (l1_aligned < end) {
			ret = mmu_l2_map(mmu, l1_pt, idx,
					   l2_pt, ptr, l1_aligned, phys);
			phys += (l1_aligned - ptr);
			ptr = l1_aligned;
		} else {
			ret = mmu_l2_map(mmu, l1_pt, idx,
					   l2_pt, ptr, end, phys);
			phys += (end - ptr);
			ptr = end;
		}

		if (ret) {
			dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");

			/* free all mapped pages */
			free_mmu_map(mmu, start, ptr);

			return -EINVAL;
		}
	} while (ptr < end && idx < ISP_L1PT_PTES);

	return 0;
}