void bl1_plat_set_ep_info(unsigned int image_id,
		entry_point_info_t *ep_info)
{
	unsigned int data = 0;
	uintptr_t tmp = HIKEY960_NS_TMP_OFFSET;

	if (image_id != NS_BL1U_IMAGE_ID)
		panic();
	/* Copy NS BL1U from 0x1AC1_8000 to 0x1AC9_8000 */
	memcpy((void *)tmp, (void *)HIKEY960_NS_IMAGE_OFFSET,
		NS_BL1U_SIZE);
	memcpy((void *)NS_BL1U_BASE, (void *)tmp, NS_BL1U_SIZE);
	inv_dcache_range(NS_BL1U_BASE, NS_BL1U_SIZE);
	/* Initialize the GIC driver, cpu and distributor interfaces */
	gicv2_driver_init(&hikey960_gic_data);
	gicv2_distif_init();
	gicv2_pcpu_distif_init();
	gicv2_cpuif_enable();
	/* CNTFRQ is read-only in EL1 */
	write_cntfrq_el0(plat_get_syscnt_freq2());
	data = read_cpacr_el1();
	do {
		data |= 3 << 20;
		write_cpacr_el1(data);
		data = read_cpacr_el1();
	} while ((data & (3 << 20)) != (3 << 20));
	INFO("cpacr_el1:0x%x\n", data);

	ep_info->args.arg0 = 0xffff & read_mpidr();
	ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
				DISABLE_ALL_EXCEPTIONS);
}
static size_t uniphier_emmc_read(int lba, uintptr_t buf, size_t size)
{
	uintptr_t host_base = 0x5a000200;
	int ret;

	inv_dcache_range(buf, size);

	if (!uniphier_emmc_block_addressing)
		lba *= 512;

	ret = uniphier_emmc_load_image(host_base, lba, buf, size / 512);

	inv_dcache_range(buf, size);

	return ret ? 0 : size;
}
/*
 * This function retrieves the `ts` value from the storage identified by
 * `base_addr`, `tid` and `cpuid`.
 * Note: The timestamp addresses are cache line aligned per cpu.
 */
unsigned long long __pmf_get_timestamp(uintptr_t base_addr,
                                       unsigned int tid,
                                       unsigned int cpuid,
                                       unsigned int flags)
{
    assert(cpuid < PLATFORM_CORE_COUNT);
    unsigned long long *ts_addr = (unsigned long long *)calc_ts_addr(base_addr,
                                  tid, cpuid);

    if (flags & PMF_CACHE_MAINT)
        inv_dcache_range((uintptr_t)ts_addr, sizeof(unsigned long long));

    return *ts_addr;
}
示例#4
0
/*******************************************************************************
 * Generic function to load and authenticate an image. The image is actually
 * loaded by calling the 'load_image()' function. In addition, this function
 * uses recursion to authenticate the parent images up to the root of trust.
 ******************************************************************************/
int load_auth_image(meminfo_t *mem_layout,
		    unsigned int image_id,
		    uintptr_t image_base,
		    image_info_t *image_data,
		    entry_point_info_t *entry_point_info)
{
	int rc;

#if TRUSTED_BOARD_BOOT
	unsigned int parent_id;

	/* Use recursion to authenticate parent images */
	rc = auth_mod_get_parent_id(image_id, &parent_id);
	if (rc == 0) {
		rc = load_auth_image(mem_layout, parent_id, image_base,
				     image_data, NULL);
		if (rc != LOAD_SUCCESS) {
			return rc;
		}
	}
#endif /* TRUSTED_BOARD_BOOT */

	/* Load the image */
	rc = load_image(mem_layout, image_id, image_base, image_data,
			entry_point_info);
	if (rc != IO_SUCCESS) {
		return LOAD_ERR;
	}

#if TRUSTED_BOARD_BOOT
	/* Authenticate it */
	rc = auth_mod_verify_img(image_id,
				 (void *)image_data->image_base,
				 image_data->image_size);
	if (rc != 0) {
		memset((void *)image_data->image_base, 0x00,
		       image_data->image_size);
		flush_dcache_range(image_data->image_base,
				   image_data->image_size);
		return LOAD_AUTH_ERR;
	}

	/* After working with data, invalidate the data cache */
	inv_dcache_range(image_data->image_base,
			(size_t)image_data->image_size);
#endif /* TRUSTED_BOARD_BOOT */

	return LOAD_SUCCESS;
}
static uint32_t scp_boot_message_wait(size_t size)
{
	uint32_t mhu_status;

	mhu_status = mhu_secure_message_wait();

	/* Expect an SCP Boot Protocol message, reject any other protocol */
	if (mhu_status != (1 << BOM_MHU_SLOT_ID)) {
		ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
			mhu_status);
		panic();
	}

	/* Make sure we see the reply from the SCP and not any stale data */
	if (MHU_PAYLOAD_CACHED)
		inv_dcache_range(BOM_SHARED_MEM, size);

	return *(uint32_t *) BOM_SHARED_MEM;
}
示例#6
0
/*
 * Write to the field in the structure corresponding to `structure_id`.
 * `fld_off` is the offset to the field in the structure and `mode`
 * indicates whether cache maintenance need to performed for the write.
 * The `data` is the pointer to data of size specified by `size`.
 * Returns SDS_OK on success or corresponding error codes on failure.
 */
int sds_struct_write(uint32_t structure_id, unsigned int fld_off,
		void *data, size_t size, sds_access_mode_t mode)
{
	int status;
	uintptr_t field_base;
	struct_header_t *header = NULL;

	if (!data)
		return SDS_ERR_INVALID_PARAMS;

	/* Check if a structure with this ID exists */
	status = get_struct_header(structure_id, &header);
	if (status != SDS_OK)
		return status;

	assert(header);

	if (mode == SDS_ACCESS_MODE_CACHED)
		inv_dcache_range((uintptr_t)header, SDS_HEADER_SIZE + size);

	if (!IS_SDS_HEADER_VALID(header)) {
		WARN("SDS: Writing to un-finalized structure 0x%x\n",
				structure_id);
		return SDS_ERR_STRUCT_NOT_FINALIZED;
	}

	if ((fld_off + size) > GET_SDS_HEADER_STRUCT_SIZE(header))
		return SDS_ERR_FAIL;

	field_base = (uintptr_t)header + SDS_HEADER_SIZE + fld_off;
	if (check_uptr_overflow(field_base, size - 1))
		return SDS_ERR_FAIL;

	/* Copy the required field in the struct */
	memcpy((void *)field_base, data, size);

	if (mode == SDS_ACCESS_MODE_CACHED)
		flush_dcache_range((uintptr_t)field_base, size);

	return SDS_OK;
}
static int stm32_sdmmc2_read(int lba, uintptr_t buf, size_t size)
{
	uint32_t error_flags = SDMMC_STAR_RXOVERR | SDMMC_STAR_DCRCFAIL |
			       SDMMC_STAR_DTIMEOUT;
	uint32_t flags = error_flags | SDMMC_STAR_DATAEND;
	uint32_t status;
	uint32_t *buffer;
	uintptr_t base = sdmmc2_params.reg_base;
	uintptr_t fifo_reg = base + SDMMC_FIFOR;
	unsigned int start;
	int ret;

	/* Assert buf is 4 bytes aligned */
	assert((buf & GENMASK(1, 0)) == 0U);

	buffer = (uint32_t *)buf;

	if (sdmmc2_params.use_dma) {
		inv_dcache_range(buf, size);

		return 0;
	}

	if (size <= MMC_BLOCK_SIZE) {
		flags |= SDMMC_STAR_DBCKEND;
	}

	start = get_timer(0);

	do {
		status = mmio_read_32(base + SDMMC_STAR);

		if ((status & error_flags) != 0U) {
			ERROR("%s: Read error (status = %x)\n", __func__,
			      status);
			mmio_write_32(base + SDMMC_DCTRLR,
				      SDMMC_DCTRLR_FIFORST);

			mmio_write_32(base + SDMMC_ICR,
				      SDMMC_STATIC_FLAGS);

			ret = stm32_sdmmc2_stop_transfer();
			if (ret != 0) {
				return ret;
			}

			return -EIO;
		}

		if (get_timer(start) > TIMEOUT_1_S) {
			ERROR("%s: timeout 1s (status = %x)\n",
			      __func__, status);
			mmio_write_32(base + SDMMC_ICR,
				      SDMMC_STATIC_FLAGS);

			ret = stm32_sdmmc2_stop_transfer();
			if (ret != 0) {
				return ret;
			}

			return -ETIMEDOUT;
		}

		if (size < (8U * sizeof(uint32_t))) {
			if ((mmio_read_32(base + SDMMC_DCNTR) > 0U) &&
			    ((status & SDMMC_STAR_RXFIFOE) == 0U)) {
				*buffer = mmio_read_32(fifo_reg);
				buffer++;
			}
		} else if ((status & SDMMC_STAR_RXFIFOHF) != 0U) {
			uint32_t count;

			/* Read data from SDMMC Rx FIFO */
			for (count = 0; count < 8U; count++) {
				*buffer = mmio_read_32(fifo_reg);
				buffer++;
			}
		}
	} while ((status & flags) == 0U);

	mmio_write_32(base + SDMMC_ICR, SDMMC_STATIC_FLAGS);

	if ((status & SDMMC_STAR_DPSMACT) != 0U) {
		WARN("%s: DPSMACT=1, send stop\n", __func__);
		return stm32_sdmmc2_stop_transfer();
	}

	return 0;
}
static int stm32_sdmmc2_prepare(int lba, uintptr_t buf, size_t size)
{
	struct mmc_cmd cmd;
	int ret;
	uintptr_t base = sdmmc2_params.reg_base;
	uint32_t data_ctrl = SDMMC_DCTRLR_DTDIR;

	if (size == 8U) {
		data_ctrl |= SDMMC_DBLOCKSIZE_8;
	} else {
		data_ctrl |= SDMMC_DBLOCKSIZE_512;
	}

	sdmmc2_params.use_dma = plat_sdmmc2_use_dma(base, buf);

	if (sdmmc2_params.use_dma) {
		inv_dcache_range(buf, size);
	}

	/* Prepare CMD 16*/
	mmio_write_32(base + SDMMC_DTIMER, 0);

	mmio_write_32(base + SDMMC_DLENR, 0);

	mmio_write_32(base + SDMMC_DCTRLR, 0);

	zeromem(&cmd, sizeof(struct mmc_cmd));

	cmd.cmd_idx = MMC_CMD(16);
	if (size > MMC_BLOCK_SIZE) {
		cmd.cmd_arg = MMC_BLOCK_SIZE;
	} else {
		cmd.cmd_arg = size;
	}

	cmd.resp_type = MMC_RESPONSE_R1;

	ret = stm32_sdmmc2_send_cmd(&cmd);
	if (ret != 0) {
		ERROR("CMD16 failed\n");
		return ret;
	}

	/* Prepare data command */
	mmio_write_32(base + SDMMC_DTIMER, UINT32_MAX);

	mmio_write_32(base + SDMMC_DLENR, size);

	if (sdmmc2_params.use_dma) {
		mmio_write_32(base + SDMMC_IDMACTRLR,
			      SDMMC_IDMACTRLR_IDMAEN);
		mmio_write_32(base + SDMMC_IDMABASE0R, buf);

		flush_dcache_range(buf, size);
	}

	mmio_clrsetbits_32(base + SDMMC_DCTRLR,
			   SDMMC_DCTRLR_CLEAR_MASK,
			   data_ctrl);

	return 0;
}