int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
                            phys_addr_t pa, size_t len, int prot)
{
    u64 *fl_pte;
    u32 fl_offset;
    u32 sl_offset;
    u64 *sl_table;
    u64 *sl_pte;
    u64 upper_attr;
    u64 lower_attr;
    s32 ret;
    u32 redirect = pt->redirect;

    ret = common_error_check(len, pt->fl_table);
    if (ret)
        goto fail;

    if (!pt->fl_table) {
        pr_err("Null page table\n");
        ret = -EINVAL;
        goto fail;
    }

    __get_attr(prot, &upper_attr, &lower_attr);

    fl_offset = FL_OFFSET(va);
    fl_pte = pt->fl_table + fl_offset;

    ret = handle_1st_lvl(fl_pte, pa, upper_attr, lower_attr, len, redirect);
    if (ret)
        goto fail;

    sl_table = FOLLOW_TO_NEXT_TABLE(fl_pte);
    sl_offset = SL_OFFSET(va);
    sl_pte = sl_table + sl_offset;

    if (len == SZ_32M)
        ret = sl_32m_map(sl_pte, pa, upper_attr, lower_attr, redirect);
    else if (len == SZ_2M)
        ret = sl_2m_map(sl_pte, pa, upper_attr, lower_attr, redirect);
    else if (len == SZ_64K || len == SZ_4K)
        ret = handle_3rd_lvl(sl_pte, va, pa, upper_attr, lower_attr,
                             len, redirect);

fail:
    return ret;
}
void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
				 size_t len)
{
	/*
	 * Adding 2 for worst case. We could be spanning 3 second level pages
	 * if we unmapped just over 1MB.
	 */
	u32 n_entries = len / SZ_1M + 2;
	u32 fl_offset = FL_OFFSET(va);
	u32 i;

	for (i = 0; i < n_entries && fl_offset < NUM_FL_PTE; ++i) {
		u32 *fl_pte_shadow = pt->fl_table_shadow + fl_offset;
		void *sl_table_va = __va(((*fl_pte_shadow) & ~0x1FF));
		u32 sl_table = *fl_pte_shadow;

		if (sl_table && !(sl_table & 0x1FF)) {
			free_pages((unsigned long) sl_table_va,
				   get_order(SZ_4K));
			*fl_pte_shadow = 0;
		}
		++fl_offset;
	}
}
static void __msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, u32 va,
        u32 len, u32 silent)
{
    u32 offset = 0;
    u64 *fl_pte;
    u64 *sl_pte;
    u64 *tl_pte;
    u32 fl_offset;
    u32 sl_offset;
    u64 *sl_table;
    u64 *tl_table;
    u32 sl_start, sl_end;
    u32 tl_start, tl_end;
    u32 redirect = pt->redirect;

    BUG_ON(len & (SZ_4K - 1));

    while (offset < len) {
        u32 entries;
        u32 check;
        u32 left_to_unmap = len - offset;
        u32 type;

        fl_offset = FL_OFFSET(va);
        fl_pte = pt->fl_table + fl_offset;

        if (*fl_pte == 0) {
            if (!silent)
                pr_err("First level PTE is 0 at index 0x%x (offset: 0x%x)\n",
                       fl_offset, offset);
            return;
        }
        type = *fl_pte & FLSL_PTE_TYPE_MASK;

        if (type == FLSL_TYPE_BLOCK) {
            fl_1G_unmap(fl_pte, redirect);
            va += SZ_1G;
            offset += SZ_1G;
        } else if (type == FLSL_TYPE_TABLE) {
            sl_table = FOLLOW_TO_NEXT_TABLE(fl_pte);
            sl_offset = SL_OFFSET(va);
            sl_pte = sl_table + sl_offset;
            type = *sl_pte & FLSL_PTE_TYPE_MASK;

            if (type == FLSL_TYPE_BLOCK) {
                sl_start = sl_offset;
                sl_end = (left_to_unmap / SZ_2M) + sl_start;

                if (sl_end > NUM_TL_PTE)
                    sl_end = NUM_TL_PTE;

                entries = sl_end - sl_start;

                memset(sl_table + sl_start, 0,
                       entries * sizeof(*sl_pte));

                clean_pte(sl_table + sl_start,
                          sl_table + sl_end, redirect);

                /* If we just unmapped the whole table, don't
                 * bother seeing if there are still used
                 * entries left.
                 */
                check = ((sl_end - sl_start) != NUM_SL_PTE);

                free_table(fl_pte, sl_table, NUM_SL_PTE,
                           redirect, check);

                offset += entries * SZ_2M;
                va += entries * SZ_2M;
            } else if (type == FLSL_TYPE_TABLE) {
                u32 tbl_freed;

                tl_start = TL_OFFSET(va);
                tl_table =  FOLLOW_TO_NEXT_TABLE(sl_pte);
                tl_end = (left_to_unmap / SZ_4K) + tl_start;

                if (tl_end > NUM_TL_PTE)
                    tl_end = NUM_TL_PTE;

                entries = tl_end - tl_start;

                memset(tl_table + tl_start, 0,
                       entries * sizeof(*tl_pte));

                clean_pte(tl_table + tl_start,
                          tl_table + tl_end, redirect);

                /* If we just unmapped the whole table, don't
                 * bother seeing if there are still used
                 * entries left.
                 */
                check = entries != NUM_TL_PTE;

                tbl_freed = free_table(sl_pte, tl_table,
                                       NUM_TL_PTE, redirect, check);
                if (tbl_freed)
                    free_table(fl_pte, sl_table, NUM_SL_PTE,
                               redirect, 1);

                offset += entries * SZ_4K;
                va += entries * SZ_4K;
            } else {
                if (!silent)
                    pr_err("Second level PTE (0x%llx) is invalid at index 0x%x (offset: 0x%x)\n",
                           *sl_pte, sl_offset, offset);
            }
        } else {
            if (!silent)
                pr_err("First level PTE (0x%llx) is invalid at index 0x%x (offset: 0x%x)\n",
                       *fl_pte, fl_offset, offset);
        }
    }
}
s32 msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, u32 va,
                                  struct scatterlist *sg, u32 len, s32 prot)
{
    phys_addr_t pa;
    u32 offset = 0;
    u64 *fl_pte;
    u64 *sl_pte;
    u32 fl_offset;
    u32 sl_offset;
    u64 *sl_table = NULL;
    u32 chunk_size, chunk_offset = 0;
    s32 ret = 0;
    u64 up_at;
    u64 lo_at;
    u32 redirect = pt->redirect;
    unsigned int start_va = va;

    BUG_ON(len & (SZ_4K - 1));

    if (!pt->fl_table) {
        pr_err("Null page table\n");
        ret = -EINVAL;
        goto fail;
    }

    __get_attr(prot, &up_at, &lo_at);

    pa = get_phys_addr(sg);

    while (offset < len) {
        u32 chunk_left = sg->length - chunk_offset;

        fl_offset = FL_OFFSET(va);
        fl_pte = pt->fl_table + fl_offset;

        chunk_size = SZ_4K;
        if (is_fully_aligned(va, pa, chunk_left, SZ_1G))
            chunk_size = SZ_1G;
        else if (is_fully_aligned(va, pa, chunk_left, SZ_32M))
            chunk_size = SZ_32M;
        else if (is_fully_aligned(va, pa, chunk_left, SZ_2M))
            chunk_size = SZ_2M;
        else if (is_fully_aligned(va, pa, chunk_left, SZ_64K))
            chunk_size = SZ_64K;

        trace_iommu_map_range(va, pa, sg->length, chunk_size);

        ret = handle_1st_lvl(fl_pte, pa, up_at, lo_at,
                             chunk_size, redirect);
        if (ret)
            goto fail;

        sl_table = FOLLOW_TO_NEXT_TABLE(fl_pte);
        sl_offset = SL_OFFSET(va);
        sl_pte = sl_table + sl_offset;

        if (chunk_size == SZ_32M)
            ret = sl_32m_map(sl_pte, pa, up_at, lo_at, redirect);
        else if (chunk_size == SZ_2M)
            ret = sl_2m_map(sl_pte, pa, up_at, lo_at, redirect);
        else if (chunk_size == SZ_64K || chunk_size == SZ_4K)
            ret = handle_3rd_lvl(sl_pte, va, pa, up_at, lo_at,
                                 chunk_size, redirect);
        if (ret)
            goto fail;

        offset += chunk_size;
        chunk_offset += chunk_size;
        va += chunk_size;
        pa += chunk_size;

        if (chunk_offset >= sg->length && offset < len) {
            chunk_offset = 0;
            sg = sg_next(sg);
            pa = get_phys_addr(sg);
        }
    }
fail:
    if (ret && offset > 0)
        __msm_iommu_pagetable_unmap_range(pt, start_va, offset, 1);
    return ret;
}