static inline u64 *make_second_level_tbl(s32 redirect, u64 *fl_pte) { u64 *sl = (u64 *) __get_free_page(GFP_KERNEL); if (!sl) { pr_err("Could not allocate second level table\n"); goto fail; } memset(sl, 0, SZ_4K); clean_pte(sl, sl + NUM_SL_PTE, redirect); /* Leave APTable bits 0 to let next level decide access permissinons */ *fl_pte = (((phys_addr_t)__pa(sl)) & FLSL_BASE_MASK) | FLSL_TYPE_TABLE; clean_pte(fl_pte, fl_pte + 1, redirect); fail: return sl; }
int msm_iommu_pagetable_alloc(struct iommu_pt *pt) { pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K)); if (!pt->fl_table) return -ENOMEM; memset(pt->fl_table, 0, SZ_16K); clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect); return 0; }
int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt) { pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K)); if (!pt->fl_table) return -ENOMEM; memset(pt->fl_table, 0, SZ_16K); clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect); add_meminfo_total_pages(NR_IOMMU_PAGETABLES_PAGES, 1 << get_order(SZ_16K)); return 0; }
static inline s32 tl_4k_map(u64 *tl_pte, phys_addr_t pa, u64 upper_attr, u64 lower_attr, s32 redirect) { s32 ret = 0; if (*tl_pte) { ret = -EBUSY; goto fail; } *tl_pte = upper_attr | (pa & TL_PAGE_MASK) | lower_attr | TL_TYPE_PAGE; clean_pte(tl_pte, tl_pte + 1, redirect); fail: return ret; }
static inline s32 sl_2m_map(u64 *sl_pte, phys_addr_t pa, u64 upper_attr, u64 lower_attr, s32 redirect) { s32 ret = 0; if (*sl_pte) { ret = -EBUSY; goto fail; } *sl_pte = upper_attr | (pa & FLSL_BLOCK_MASK) | lower_attr | FLSL_TYPE_BLOCK; clean_pte(sl_pte, sl_pte + 1, redirect); fail: return ret; }
int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt) { pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K)); if (!pt->fl_table) return -ENOMEM; #ifdef CONFIG_LGE_MEMORY_INFO __mod_zone_page_state(page_zone(virt_to_page((void *)pt->fl_table)), NR_IOMMU_PAGES, (1UL << get_order(SZ_16K))); #endif memset(pt->fl_table, 0, SZ_16K); clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect); return 0; }
s32 msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt) { u32 size = PTE_SIZE * NUM_FL_PTE + FL_ALIGN; phys_addr_t fl_table_phys; pt->unaligned_fl_table = kzalloc(size, GFP_KERNEL); if (!pt->unaligned_fl_table) return -ENOMEM; fl_table_phys = virt_to_phys(pt->unaligned_fl_table); fl_table_phys = ALIGN(fl_table_phys, FL_ALIGN); pt->fl_table = phys_to_virt(fl_table_phys); clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect); return 0; }
static u32 free_table(u64 *prev_level_pte, u64 *table, u32 table_len, s32 redirect, u32 check) { u32 i; u32 used = 0; if (check) { for (i = 0; i < table_len; ++i) if (table[i]) { used = 1; break; } } if (!used) { free_page((u32)table); *prev_level_pte = 0; clean_pte(prev_level_pte, prev_level_pte + 1, redirect); } return !used; }
static inline s32 tl_64k_map(u64 *tl_pte, phys_addr_t pa, u64 upper_attr, u64 lower_attr, s32 redirect) { s32 ret = 0; s32 i; for (i = 0; i < 16; ++i) if (*(tl_pte+i)) { ret = -EBUSY; goto fail; } /* Add Contiguous hint TL_CH */ upper_attr |= TL_CH; for (i = 0; i < 16; ++i) *(tl_pte+i) = upper_attr | (pa & TL_PAGE_MASK) | lower_attr | TL_TYPE_PAGE; clean_pte(tl_pte, tl_pte + 16, redirect); fail: return ret; }
static inline s32 sl_32m_map(u64 *sl_pte, phys_addr_t pa, u64 upper_attr, u64 lower_attr, s32 redirect) { s32 i; s32 ret = 0; for (i = 0; i < 16; ++i) { if (*(sl_pte+i)) { ret = -EBUSY; goto fail; } } /* Add Contiguous hint TL_CH */ upper_attr |= TL_CH; for (i = 0; i < 16; ++i) *(sl_pte+i) = upper_attr | (pa & FLSL_BLOCK_MASK) | lower_attr | FLSL_TYPE_BLOCK; clean_pte(sl_pte, sl_pte + 16, redirect); fail: return ret; }
static void __msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, u32 va, u32 len, u32 silent) { u32 offset = 0; u64 *fl_pte; u64 *sl_pte; u64 *tl_pte; u32 fl_offset; u32 sl_offset; u64 *sl_table; u64 *tl_table; u32 sl_start, sl_end; u32 tl_start, tl_end; u32 redirect = pt->redirect; BUG_ON(len & (SZ_4K - 1)); while (offset < len) { u32 entries; u32 check; u32 left_to_unmap = len - offset; u32 type; fl_offset = FL_OFFSET(va); fl_pte = pt->fl_table + fl_offset; if (*fl_pte == 0) { if (!silent) pr_err("First level PTE is 0 at index 0x%x (offset: 0x%x)\n", fl_offset, offset); return; } type = *fl_pte & FLSL_PTE_TYPE_MASK; if (type == FLSL_TYPE_BLOCK) { fl_1G_unmap(fl_pte, redirect); va += SZ_1G; offset += SZ_1G; } else if (type == FLSL_TYPE_TABLE) { sl_table = FOLLOW_TO_NEXT_TABLE(fl_pte); sl_offset = SL_OFFSET(va); sl_pte = sl_table + sl_offset; type = *sl_pte & FLSL_PTE_TYPE_MASK; if (type == FLSL_TYPE_BLOCK) { sl_start = sl_offset; sl_end = (left_to_unmap / SZ_2M) + sl_start; if (sl_end > NUM_TL_PTE) sl_end = NUM_TL_PTE; entries = sl_end - sl_start; memset(sl_table + sl_start, 0, entries * sizeof(*sl_pte)); clean_pte(sl_table + sl_start, sl_table + sl_end, redirect); /* If we just unmapped the whole table, don't * bother seeing if there are still used * entries left. */ check = ((sl_end - sl_start) != NUM_SL_PTE); free_table(fl_pte, sl_table, NUM_SL_PTE, redirect, check); offset += entries * SZ_2M; va += entries * SZ_2M; } else if (type == FLSL_TYPE_TABLE) { u32 tbl_freed; tl_start = TL_OFFSET(va); tl_table = FOLLOW_TO_NEXT_TABLE(sl_pte); tl_end = (left_to_unmap / SZ_4K) + tl_start; if (tl_end > NUM_TL_PTE) tl_end = NUM_TL_PTE; entries = tl_end - tl_start; memset(tl_table + tl_start, 0, entries * sizeof(*tl_pte)); clean_pte(tl_table + tl_start, tl_table + tl_end, redirect); /* If we just unmapped the whole table, don't * bother seeing if there are still used * entries left. */ check = entries != NUM_TL_PTE; tbl_freed = free_table(sl_pte, tl_table, NUM_TL_PTE, redirect, check); if (tbl_freed) free_table(fl_pte, sl_table, NUM_SL_PTE, redirect, 1); offset += entries * SZ_4K; va += entries * SZ_4K; } else { if (!silent) pr_err("Second level PTE (0x%llx) is invalid at index 0x%x (offset: 0x%x)\n", *sl_pte, sl_offset, offset); } } else { if (!silent) pr_err("First level PTE (0x%llx) is invalid at index 0x%x (offset: 0x%x)\n", *fl_pte, fl_offset, offset); } } }
static void fl_1G_unmap(u64 *fl_pte, s32 redirect) { *fl_pte = 0; clean_pte(fl_pte, fl_pte + 1, redirect); }