_setup_initial_ttbl(virtual_addr_t load_start,
		    virtual_addr_t load_end,
		    virtual_addr_t exec_start,
		    virtual_addr_t exec_end)
{
	int * ttbl_tree;
	u32 i, index, map_exec;
	u32 ttbl_count;
	u64 *ttbl, *nttbl;
	virtual_addr_t ttbl_base, page_addr;
	physical_addr_t pa;

	/* Initialize HMAIR0 and HMAIR1 for using caching 
	 * attributes via attrindex of each page
	 */
	write_hmair0(HMAIR0_INITVAL);
	write_hmair1(HMAIR0_INITVAL);

	ttbl = NULL;
	ttbl_base = to_load_pa((virtual_addr_t)&def_ttbl);
	nttbl = (u64 *)ttbl_base;
	ttbl_tree = (int *)to_load_pa((virtual_addr_t)&def_ttbl_tree);
	for (i = 0; i < TTBL_INITIAL_TABLE_COUNT; i++) {
		ttbl_tree[i] = -1;
	}
	ttbl_count = 0;

	/* Allocate level1 table */
	if (ttbl_count == TTBL_INITIAL_TABLE_COUNT) {
		while(1); /* No initial table available */
	}
	for (i = 0; i < TTBL_TABLE_ENTCNT; i++) {
		nttbl[i] = 0x0ULL;
	}
	ttbl_count++;
	ttbl = nttbl;
	nttbl += TTBL_TABLE_ENTCNT;

	map_exec = 0;
	page_addr = load_start;
	while (1) {
		if (!map_exec && load_end <= page_addr) {
			map_exec = 1;
			page_addr = exec_start;
		} else if (map_exec && exec_end <= page_addr) {
			break;
		}

		/* Setup level1 table */
		ttbl = (u64 *)to_load_pa((virtual_addr_t)&def_ttbl);
		index = (page_addr & TTBL_L1_INDEX_MASK) >> TTBL_L1_INDEX_SHIFT;
		if (ttbl[index] & TTBL_VALID_MASK) {
			/* Find level2 table */
			ttbl = (u64 *)(u32)(ttbl[index] & TTBL_OUTADDR_MASK);
		} else {
			/* Allocate new level2 table */
			if (ttbl_count == TTBL_INITIAL_TABLE_COUNT) {
				while(1); /* No initial table available */
			}
			for (i = 0; i < TTBL_TABLE_ENTCNT; i++) {
				nttbl[i] = 0x0ULL;
			}
			ttbl_tree[ttbl_count] = ((u32)ttbl - ttbl_base) >> 
							TTBL_TABLE_SIZE_SHIFT;
			ttbl_count++;
			ttbl[index] |= (((virtual_addr_t)nttbl) & TTBL_OUTADDR_MASK);
			ttbl[index] |= (TTBL_TABLE_MASK | TTBL_VALID_MASK);
			ttbl = nttbl;
			nttbl += TTBL_TABLE_ENTCNT;
		}

		/* Setup level2 table */
		index = (page_addr & TTBL_L2_INDEX_MASK) >> TTBL_L2_INDEX_SHIFT;
		if (ttbl[index] & TTBL_VALID_MASK) {
			/* Find level3 table */
			ttbl = (u64 *)(u32)(ttbl[index] & TTBL_OUTADDR_MASK);
		} else {
			/* Allocate new level3 table */
			if (ttbl_count == TTBL_INITIAL_TABLE_COUNT) {
				while(1); /* No initial table available */
			}
			for (i = 0; i < TTBL_TABLE_ENTCNT; i++) {
				nttbl[i] = 0x0ULL;
			}
			ttbl_tree[ttbl_count] = ((u32)ttbl - ttbl_base) >> 
							TTBL_TABLE_SIZE_SHIFT;
			ttbl_count++;
			ttbl[index] |= (((virtual_addr_t)nttbl) & TTBL_OUTADDR_MASK);
			ttbl[index] |= (TTBL_TABLE_MASK | TTBL_VALID_MASK);
			ttbl = nttbl;
			nttbl += TTBL_TABLE_ENTCNT;
		}

		/* Setup level3 table */
		index = (page_addr & TTBL_L3_INDEX_MASK) >> TTBL_L3_INDEX_SHIFT;
		if (!(ttbl[index] & TTBL_VALID_MASK)) {
			/* Update level3 table */
			if (map_exec) {
				ttbl[index] |= (to_load_pa(page_addr) & TTBL_OUTADDR_MASK);
			} else {
				ttbl[index] |= (page_addr & TTBL_OUTADDR_MASK);
			}
			ttbl[index] |= TTBL_STAGE1_LOWER_AF_MASK;
			ttbl[index] |= (TTBL_AP_SRW_U << TTBL_STAGE1_LOWER_AP_SHIFT);
			ttbl[index] |= (AINDEX_NORMAL_WT << TTBL_STAGE1_LOWER_AINDEX_SHIFT) & 
							TTBL_STAGE1_LOWER_AINDEX_MASK;
			ttbl[index] |= (TTBL_TABLE_MASK | TTBL_VALID_MASK);
		}

		/* Point to next page */
		page_addr += TTBL_L3_BLOCK_SIZE;
	}

	/* Setup Hypervisor Translation Control Register */
	i = read_htcr();
	i &= ~HTCR_T0SZ_MASK; /* Ensure T0SZ = 0 */
	write_htcr(i);

	/* Setup Hypervisor Translation Table Base Register */
	/* Note: if MMU is disabled then va = pa */
	pa = to_load_pa((virtual_addr_t)&def_ttbl);;
	pa &= HTTBR_BADDR_MASK;
	write_httbr(pa);

	/* Setup Hypervisor Virtual Translation Control Register */
	i = read_vtcr();
	i |= (0x1 << VTCR_SL0_SHIFT) & VTCR_SL0_MASK;
	write_vtcr(i);
}
    _setup_initial_ttbl(virtual_addr_t load_start, virtual_addr_t load_end,
			virtual_addr_t exec_start, virtual_addr_t exec_end)
{
	u32 i;
#ifdef CONFIG_DEFTERM_EARLY_PRINT
	virtual_addr_t defterm_early_va;
#endif
	struct mmu_lpae_entry_ctrl lpae_entry = { 0, NULL, NULL, 0 };

	/* Init ttbl_base, ttbl_tree, and next_ttbl */
	lpae_entry.ttbl_tree =
		(int *)to_load_pa((virtual_addr_t)&def_ttbl_tree);

	for (i = 0; i < TTBL_INITIAL_TABLE_COUNT; i++) {
		lpae_entry.ttbl_tree[i] = -1;
	}

	lpae_entry.ttbl_base = to_load_pa((virtual_addr_t)&def_ttbl);
	lpae_entry.next_ttbl = (u64 *)lpae_entry.ttbl_base;

	/* Init first ttbl */
	for (i = 0; i < TTBL_TABLE_ENTCNT; i++) {
		lpae_entry.next_ttbl[i] = 0x0ULL;
	}

	lpae_entry.ttbl_count++;
	lpae_entry.next_ttbl += TTBL_TABLE_ENTCNT;

#ifdef CONFIG_DEFTERM_EARLY_PRINT
	/* Map UART for early defterm
	 * Note: This is for early debug purpose
	 */
	defterm_early_va = to_exec_va((virtual_addr_t)&defterm_early_base);
	__setup_initial_ttbl(&lpae_entry,
			     defterm_early_va,
			     defterm_early_va + TTBL_L3_BLOCK_SIZE,
			     (virtual_addr_t)CONFIG_DEFTERM_EARLY_BASE_PA,
			     AINDEX_SO, TRUE);
#endif

	/* Map physical = logical
	 * Note: This mapping is using at boot time only
	 */
	__setup_initial_ttbl(&lpae_entry, load_start, load_end, load_start,
			     AINDEX_NORMAL_WB, TRUE);

	/* Map to logical addresses which are
	 * covered by read-only linker sections
	 * Note: This mapping is used at runtime
	 */
	SETUP_RO_SECTION(lpae_entry, text);
	SETUP_RO_SECTION(lpae_entry, init);
	SETUP_RO_SECTION(lpae_entry, cpuinit);
	SETUP_RO_SECTION(lpae_entry, spinlock);
	SETUP_RO_SECTION(lpae_entry, rodata);

	/* Map rest of logical addresses which are
	 * not covered by read-only linker sections
	 * Note: This mapping is used at runtime
	 */
	__setup_initial_ttbl(&lpae_entry, exec_start, exec_end, load_start,
			     AINDEX_NORMAL_WB, TRUE);
}