/*
 * Determine the table level closest to the initial lookup level that
 * can describe this translation. Then, align base VA to the next block
 * at the determined level.
 */
static void mmap_alloc_va_align_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
{
	/*
	 * By or'ing the size and base PA the alignment will be the one
	 * corresponding to the smallest boundary of the two of them.
	 *
	 * There are three different cases. For example (for 4 KiB page size):
	 *
	 * +--------------+------------------++--------------+
	 * | PA alignment | Size multiple of || VA alignment |
	 * +--------------+------------------++--------------+
	 * |     2 MiB    |       2 MiB      ||     2 MiB    | (1)
	 * |     2 MiB    |       4 KiB      ||     4 KiB    | (2)
	 * |     4 KiB    |       2 MiB      ||     4 KiB    | (3)
	 * +--------------+------------------++--------------+
	 *
	 * - In (1), it is possible to take advantage of the alignment of the PA
	 *   and the size of the region to use a level 2 translation table
	 *   instead of a level 3 one.
	 *
	 * - In (2), the size is smaller than a block entry of level 2, so it is
	 *   needed to use a level 3 table to describe the region or the library
	 *   will map more memory than the desired one.
	 *
	 * - In (3), even though the region has the size of one level 2 block
	 *   entry, it isn't possible to describe the translation with a level 2
	 *   block entry because of the alignment of the base PA.
	 *
	 *   Only bits 47:21 of a level 2 block descriptor are used by the MMU,
	 *   bits 20:0 of the resulting address are 0 in this case. Because of
	 *   this, the PA generated as result of this translation is aligned to
	 *   2 MiB. The PA that was requested to be mapped is aligned to 4 KiB,
	 *   though, which means that the resulting translation is incorrect.
	 *   The only way to prevent this is by using a finer granularity.
	 */
	unsigned long long align_check;

	align_check = mm->base_pa | (unsigned long long)mm->size;

	/*
	 * Assume it is always aligned to level 3. There's no need to check that
	 * level because its block size is PAGE_SIZE. The checks to verify that
	 * the addresses and size are aligned to PAGE_SIZE are inside
	 * mmap_add_region.
	 */
	for (unsigned int level = ctx->base_level; level <= 2U; ++level) {

		if ((align_check & XLAT_BLOCK_MASK(level)) != 0U)
			continue;

		mm->base_va = round_up(mm->base_va, XLAT_BLOCK_SIZE(level));
		return;
	}
}
/*
 * Returns a block/page table descriptor for the given level and attributes.
 */
uint64_t xlat_desc(const xlat_ctx_t *ctx, mmap_attr_t attr,
		   unsigned long long addr_pa, int level)
{
	uint64_t desc;
	int mem_type;

	/* Make sure that the granularity is fine enough to map this address. */
	assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);

	desc = addr_pa;
	/*
	 * There are different translation table descriptors for level 3 and the
	 * rest.
	 */
	desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
	/*
	 * Always set the access flag, as TF doesn't manage access flag faults.
	 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
	 * memory region attributes.
	 */
	desc |= LOWER_ATTRS(ACCESS_FLAG);

	desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
	desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);

	/*
	 * Do not allow unprivileged access when the mapping is for a privileged
	 * EL. For translation regimes that do not have mappings for access for
	 * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
	 */
	if (ctx->xlat_regime == EL1_EL0_REGIME) {
		if (attr & MT_USER) {
			/* EL0 mapping requested, so we give User access */
			desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
		} else {
			/* EL1 mapping requested, no User access granted */
			desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
		}
	} else {
		assert(ctx->xlat_regime == EL3_REGIME);
		desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
	}

	/*
	 * Deduce shareability domain and executability of the memory region
	 * from the memory type of the attributes (MT_TYPE).
	 *
	 * Data accesses to device memory and non-cacheable normal memory are
	 * coherent for all observers in the system, and correspondingly are
	 * always treated as being Outer Shareable. Therefore, for these 2 types
	 * of memory, it is not strictly needed to set the shareability field
	 * in the translation tables.
	 */
	mem_type = MT_TYPE(attr);
	if (mem_type == MT_DEVICE) {
		desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
		/*
		 * Always map device memory as execute-never.
		 * This is to avoid the possibility of a speculative instruction
		 * fetch, which could be an issue if this memory region
		 * corresponds to a read-sensitive peripheral.
		 */
		desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);

	} else { /* Normal memory */
		/*
		 * Always map read-write normal memory as execute-never.
		 * (Trusted Firmware doesn't self-modify its code, therefore
		 * R/W memory is reserved for data storage, which must not be
		 * executable.)
		 * Note that setting the XN bit here is for consistency only.
		 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
		 * which makes any writable memory region to be treated as
		 * execute-never, regardless of the value of the XN bit in the
		 * translation table.
		 *
		 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
		 * attribute to figure out the value of the XN bit.  The actual
		 * XN bit(s) to set in the descriptor depends on the context's
		 * translation regime and the policy applied in
		 * xlat_arch_regime_get_xn_desc().
		 */
		if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
			desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
		}

		if (mem_type == MT_MEMORY) {
			desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
		} else {
			assert(mem_type == MT_NON_CACHEABLE);
			desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
		}
	}

	return desc;
}
/*
 * From the given arguments, it decides which action to take when mapping the
 * specified region.
 */
static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
		unsigned int desc_type, unsigned long long dest_pa,
		uintptr_t table_entry_base_va, unsigned int level)
{
	uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
	uintptr_t table_entry_end_va =
			table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;

	/*
	 * The descriptor types allowed depend on the current table level.
	 */

	if ((mm->base_va <= table_entry_base_va) &&
	    (mm_end_va >= table_entry_end_va)) {

		/*
		 * Table entry is covered by region
		 * --------------------------------
		 *
		 * This means that this table entry can describe the whole
		 * translation with this granularity in principle.
		 */

		if (level == 3U) {
			/*
			 * Last level, only page descriptors are allowed.
			 */
			if (desc_type == PAGE_DESC) {
				/*
				 * There's another region mapped here, don't
				 * overwrite.
				 */
				return ACTION_NONE;
			} else {
				assert(desc_type == INVALID_DESC);
				return ACTION_WRITE_BLOCK_ENTRY;
			}

		} else {

			/*
			 * Other levels. Table descriptors are allowed. Block
			 * descriptors too, but they have some limitations.
			 */

			if (desc_type == TABLE_DESC) {
				/* There's already a table, recurse into it. */
				return ACTION_RECURSE_INTO_TABLE;

			} else if (desc_type == INVALID_DESC) {
				/*
				 * There's nothing mapped here, create a new
				 * entry.
				 *
				 * Check if the destination granularity allows
				 * us to use a block descriptor or we need a
				 * finer table for it.
				 *
				 * Also, check if the current level allows block
				 * descriptors. If not, create a table instead.
				 */
				if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
				    || (level < MIN_LVL_BLOCK_DESC) ||
				    (mm->granularity < XLAT_BLOCK_SIZE(level)))
					return ACTION_CREATE_NEW_TABLE;
				else
					return ACTION_WRITE_BLOCK_ENTRY;

			} else {
				/*
				 * There's another region mapped here, don't
				 * overwrite.
				 */
				assert(desc_type == BLOCK_DESC);

				return ACTION_NONE;
			}
		}

	} else if ((mm->base_va <= table_entry_end_va) ||
		   (mm_end_va >= table_entry_base_va)) {

		/*
		 * Region partially covers table entry
		 * -----------------------------------
		 *
		 * This means that this table entry can't describe the whole
		 * translation, a finer table is needed.

		 * There cannot be partial block overlaps in level 3. If that
		 * happens, some of the preliminary checks when adding the
		 * mmap region failed to detect that PA and VA must at least be
		 * aligned to PAGE_SIZE.
		 */
		assert(level < 3U);

		if (desc_type == INVALID_DESC) {
			/*
			 * The block is not fully covered by the region. Create
			 * a new table, recurse into it and try to map the
			 * region with finer granularity.
			 */
			return ACTION_CREATE_NEW_TABLE;

		} else {
			assert(desc_type == TABLE_DESC);
			/*
			 * The block is not fully covered by the region, but
			 * there is already a table here. Recurse into it and
			 * try to map with finer granularity.
			 *
			 * PAGE_DESC for level 3 has the same value as
			 * TABLE_DESC, but this code can't run on a level 3
			 * table because there can't be overlaps in level 3.
			 */
			return ACTION_RECURSE_INTO_TABLE;
		}
	} else {

		/*
		 * This table entry is outside of the region specified in the
		 * arguments, don't write anything to it.
		 */
		return ACTION_NONE;
	}
}