void
mas_atexit( void )
{
  CTRL_PREPARE;
  /* extern mas_options_t g_opts; */
  extern mas_options_t *gpopts;

  {
    int rn = 0;
    char name_buffer[32] = "?";

    IEVAL( rn, prctl( PR_GET_NAME, ( unsigned long ) name_buffer ) );
    IEVAL( rn, prctl( PR_SET_NAME, ( unsigned long ) "zocMainAtexit" ) );
    HMSG( "AT EXIT %s: logQ: %lu - %lu = %lu", name_buffer, ctrl.log_q_came, ctrl.log_q_gone, ctrl.log_q_came - ctrl.log_q_gone );
  }

  mas_server_destroy( gpopts );
  /* mas_server_destroy( MAS_PASS_OPTS_REF ); */
#ifdef MAS_TRACEMEM
  {
    extern unsigned long memory_balance;

    /* IMSG( "AT EXIT, memory_balance:%ld", memory_balance );                                                         */
    /* IMSG( "AT EXIT, memory_balance:%ld : logQ: %lu - %lu = %lu", memory_balance, ctrl.log_q_came, ctrl.log_q_gone, */
    /*       ctrl.log_q_came - ctrl.log_q_gone );                                                                     */
    /* print_memlist_any( FL, 4, ctrl.stderrfile, ctrl.old_stderrfile, ctrl.msgfile, stderr ); */
    print_memlist_any( FL, 4, ctrl.msgfile, ctrl.stderrfile, ctrl.old_stderrfile, stderr );
    IMSG( "(1)AT EXIT, memory_balance:%ld : logQ: %lu - %lu = %lu", memory_balance, ctrl.log_q_came, ctrl.log_q_gone,
          ctrl.log_q_came - ctrl.log_q_gone );
  }
#else
  IMSG( "AT EXIT" );
#endif
  IMSG( "=====[%u @ %u]=================================================================", getpid(  ), getppid(  ) );
  if ( ctrl.msgfile )
  {
    IMSG( "CLOSE MSG" );
    fclose( ctrl.msgfile );
    ctrl.msgfile = NULL;
  }
  {
    extern unsigned long memory_balance;
    int rn = 0;
    char name_buffer[32] = "?";

    IEVAL( rn, prctl( PR_GET_NAME, ( unsigned long ) name_buffer ) );
    IEVAL( rn, prctl( PR_SET_NAME, ( unsigned long ) "zocMainAtexit" ) );
    /* EMSG( "AT EXIT %s: logQ: %lu - %lu = %lu", name_buffer, ctrl.log_q_came, ctrl.log_q_gone, ctrl.log_q_came - ctrl.log_q_gone ); */
    IMSG( "(2)AT EXIT, memory_balance:%ld : logQ: %lu - %lu = %lu", memory_balance, ctrl.log_q_came, ctrl.log_q_gone,
          ctrl.log_q_came - ctrl.log_q_gone );
  }
  sync(  );
  /* sleep( 3 ); */
  _exit( 0 );
  /* exit_group( 0 ); */
}
Esempio n. 2
0
static TEE_Result platform_banner(void)
{
#ifdef CFG_EMBED_DTB
	IMSG("Platform stm32mp1: flavor %s - DT %s",
		ID2STR(PLATFORM_FLAVOR),
		ID2STR(CFG_EMBED_DTB_SOURCE_FILE));
#else
	IMSG("Platform stm32mp1: flavor %s - no device tree",
		ID2STR(PLATFORM_FLAVOR));
#endif

	return TEE_SUCCESS;
}
Esempio n. 3
0
static TEE_Result update_region(uint32_t param_types, TEE_Param params[4])
{
	uint32_t exp_param_types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
							TEE_PARAM_TYPE_MEMREF_INPUT,
							TEE_PARAM_TYPE_VALUE_INPUT,
							TEE_PARAM_TYPE_NONE);
	uint32_t region_id;
	bool add;
	int dir;
	char *name;
	struct secure_device *device;
	struct region *region;

	if (param_types != exp_param_types) {
		return TEE_ERROR_BAD_PARAMETERS;
	}

	region_id = params[0].value.a;
	add = params[0].value.b;

	name = params[1].memref.buffer;

	dir = params[2].value.a;

	device = platform_find_device_by_name(name);
	if (device == 0) {
		IMSG("Can't find device %s\n", name);
		return TEE_ERROR_BAD_PARAMETERS;
	}

	region = platform_find_region_by_id(region_id);
	if (region == NULL) {
		IMSG("Can't find region id %d\n", region_id);
		return TEE_ERROR_BAD_PARAMETERS;
	}

	if (add) {
		if (platform_check_permissions(region, device, dir)) {
			IMSG("check permissions failed\n");
			return TEE_ERROR_BAD_PARAMETERS;
		}

		platform_add_device_to_region(region, device, dir);
	} else {
		platform_remove_device_from_region(region, device);
	}

	return TEE_SUCCESS;
}
Esempio n. 4
0
void core_trace_test(void)
{
	INMSG("level: [%d]", _trace_level);
	IMSG("current trace level = %d", _trace_level);
	IMSG("Without args");
	AMSG("[%d] and [%s]", TRACE_ALWAYS, "TRACE_ALWAYS");
	EMSG("[%d] and [%s]", TRACE_ERROR, "TRACE_ERROR");
	IMSG("[%d] and [%s]", TRACE_INFO, "TRACE_INFO");
	DMSG("[%d] and [%s]", TRACE_DEBUG, "TRACE_DEBUG");
	FMSG("[%d] and [%s]", TRACE_FLOW, "TRACE_FLOW");
	AMSG_RAW("Raw trace in TEE CORE with level [%s]", "TRACE_ALWAYS");
	AMSG_RAW(" __ end of raw trace\n");
	DMSG_RAW("Raw trace in TEE CORE with level [%s]", "TRACE_DEBUG");
	DMSG_RAW(" __ end of raw trace\n");
	OUTMSG("");
}
Esempio n. 5
0
/*
 * Note: this function is weak just to make it possible to exclude it from
 * the unpaged area.
 */
TEE_Result __weak init_teecore(void)
{
	static int is_first = 1;

	/* (DEBUG) for inits at 1st TEE service: when UART is setup */
	if (!is_first)
		return TEE_SUCCESS;
	is_first = 0;

#ifdef CFG_WITH_USER_TA
	tee_svc_uref_base = TEE_TEXT_VA_START;
#endif

	/* init support for future mapping of TAs */
	teecore_init_pub_ram();

	/* time initialization */
	time_source_init();

	/* call pre-define initcall routines */
	call_initcalls();

	IMSG("Initialized");
	return TEE_SUCCESS;
}
Esempio n. 6
0
static void init_primary_helper(uint32_t pageable_part, uint32_t nsec_entry)
{
	/*
	 * Mask asynchronous exceptions before switch to the thread vector
	 * as the thread handler requires those to be masked while
	 * executing with the temporary stack. The thread subsystem also
	 * asserts that IRQ is blocked when using most if its functions.
	 */
	thread_set_exceptions(THREAD_EXCP_ALL);
	init_vfp_sec();

	init_runtime(pageable_part);

	IMSG("Initializing (%s)\n", core_v_str);

	thread_init_primary(generic_boot_get_handlers());
	thread_init_per_cpu();
	init_sec_mon(nsec_entry);


	main_init_gic();
	init_vfp_nsec();

	if (init_teecore() != TEE_SUCCESS)
		panic();
	DMSG("Primary CPU switching to normal world boot\n");
}
Esempio n. 7
0
TEE_Result init_teecore(void)
{
	static int is_first = 1;

	/* (DEBUG) for inits at 1st TEE service: when UART is setup */
	if (!is_first)
		return TEE_SUCCESS;
	is_first = 0;

#ifdef CFG_WITH_USER_TA
	tee_svc_uref_base = CFG_TEE_LOAD_ADDR;
#endif

	/* init support for futur mapping of TAs */
	tee_mmu_kmap_init();
	teecore_init_pub_ram();

	/* time initialization */
	time_source_init();

	/* call pre-define initcall routines */
	call_initcalls();

	IMSG("teecore inits done");
	return TEE_SUCCESS;
}
Esempio n. 8
0
TEE_Result init_teecore(void)
{
	static int is_first = 1;
	unsigned long a, s;

	/* (DEBUG) for inits at 1st TEE service: when UART is setup */
	if (!is_first)
		return TEE_SUCCESS;
	is_first = 0;

#ifndef WITH_UART_DRV
	/* UART tracing support */
	asc_init();
	IMSG("teecore: uart trace init");
#endif

	/* core malloc pool init */
#ifdef CFG_TEE_MALLOC_START
	a = CFG_TEE_MALLOC_START;
	s = CFG_TEE_MALLOC_SIZE;
#else
	a = (unsigned long)&teecore_heap_start;
	s = (unsigned long)&teecore_heap_end;
	a = ((a + 1) & ~0x0FFFF) + 0x10000;	/* 64kB aligned */
	s = s & ~0x0FFFF;	/* 64kB aligned */
	s = s - a;
#endif
	IMSG("teecore heap: paddr=0x%lX size=0x%lX (%ldkB)", a, s, s / 1024);
	malloc_init((void *)a, s);

	/* init support for futur mapping of TAs */
	tee_mmu_kmap_init();
	teecore_init_ta_ram();
	teecore_init_pub_ram();

	/* Libtomcrypt initialization */
	tee_ltc_init();

	/* time initialization */
	time_source_init();

	IMSG("teecore inits done");
	return TEE_SUCCESS;
}
Esempio n. 9
0
/* teecore heap address/size is defined in scatter file */
extern unsigned char teecore_heap_start;
extern unsigned char teecore_heap_end;

static void main_fiq(void);
static void main_tee_entry_std(struct thread_smc_args *args);
static void main_tee_entry_fast(struct thread_smc_args *args);

static const struct thread_handlers handlers = {
	.std_smc = main_tee_entry_std,
	.fast_smc = main_tee_entry_fast,
	.fiq = main_fiq,
	.svc = tee_svc_handler,
	.abort = tee_pager_abort_handler,
	.cpu_on = pm_panic,
	.cpu_off = pm_panic,
	.cpu_suspend = pm_panic,
	.cpu_resume = pm_panic,
	.system_off = pm_panic,
	.system_reset = pm_panic,
};

void main_init(uint32_t nsec_entry); /* called from assembly only */
void main_init(uint32_t nsec_entry)
{
	struct sm_nsec_ctx *nsec_ctx;
	size_t pos = get_core_pos();

	/*
	 * Mask IRQ and FIQ before switch to the thread vector as the
	 * thread handler requires IRQ and FIQ to be masked while executing
	 * with the temporary stack. The thread subsystem also asserts that
	 * IRQ is blocked when using most if its functions.
	 */
	thread_mask_exceptions(THREAD_EXCP_FIQ | THREAD_EXCP_IRQ);

	if (pos == 0) {
		thread_init_primary(&handlers);

		/* initialize platform */
		platform_init();
	}

	thread_init_per_cpu();

	/* Initialize secure monitor */
	nsec_ctx = sm_get_nsec_ctx();
	nsec_ctx->mon_lr = nsec_entry;
	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;

	if (pos == 0) {
		unsigned long a, s;
		/* core malloc pool init */
#ifdef CFG_TEE_MALLOC_START
		a = CFG_TEE_MALLOC_START;
		s = CFG_TEE_MALLOC_SIZE;
#else
		a = (unsigned long)&teecore_heap_start;
		s = (unsigned long)&teecore_heap_end;
		a = ((a + 1) & ~0x0FFFF) + 0x10000;	/* 64kB aligned */
		s = s & ~0x0FFFF;	/* 64kB aligned */
		s = s - a;
#endif
		malloc_add_pool((void *)a, s);

		teecore_init_ta_ram();

		if (init_teecore() != TEE_SUCCESS) {
			panic();
		}
	}

	IMSG("optee initialize finished\n");
}
Esempio n. 10
0
static TEE_Result test_read(TEE_ObjectHandle object, size_t data_size,
		uint8_t *chunk_buf, size_t chunk_size,
		uint32_t *spent_time_in_ms)
{
	TEE_Time start_time, stop_time;
	size_t remain_bytes = data_size;
	TEE_Result res = TEE_SUCCESS;
	uint32_t read_bytes = 0;

	TEE_GetSystemTime(&start_time);

	while (remain_bytes) {
		size_t read_size;

		DMSG("Read data, remain bytes: %zu", remain_bytes);
		if (remain_bytes < chunk_size)
			read_size = remain_bytes;
		else
			read_size = chunk_size;
		res = TEE_ReadObjectData(object, chunk_buf, read_size,
				&read_bytes);
		if (res != TEE_SUCCESS) {
			EMSG("Failed to read data, res=0x%08x", res);
			goto exit;
		}

		remain_bytes -= read_size;
	}

	TEE_GetSystemTime(&stop_time);

	*spent_time_in_ms = get_delta_time_in_ms(start_time, stop_time);

	IMSG("start: %u.%u(s), stop: %u.%u(s), delta: %u(ms)",
			start_time.seconds, start_time.millis,
			stop_time.seconds, stop_time.millis,
			*spent_time_in_ms);

exit:
	return res;
}
Esempio n. 11
0
static int pl011_dev_init(struct serial_chip *chip, const void *fdt, int offs,
			  const char *parms)
{
	struct pl011_data *pd = container_of(chip, struct pl011_data, chip);
	vaddr_t vbase;
	paddr_t pbase;
	size_t size;

	if (parms && parms[0])
		IMSG("pl011: device parameters ignored (%s)", parms);

	if (dt_map_dev(fdt, offs, &vbase, &size) < 0)
		return -1;

	if (size != 0x1000) {
		EMSG("pl011: unexpected register size: %zx", size);
		return -1;
	}

	pbase = virt_to_phys((void *)vbase);
	pl011_init(pd, pbase, 0, 0);

	return 0;
}
Esempio n. 12
0
static TEE_Result test_rewrite(TEE_ObjectHandle object, size_t data_size,
		uint8_t *chunk_buf, size_t chunk_size,
		uint32_t *spent_time_in_ms)
{
	TEE_Time start_time, stop_time;
	size_t remain_bytes = data_size;
	TEE_Result res = TEE_SUCCESS;
	uint32_t read_bytes = 0;

	TEE_GetSystemTime(&start_time);

	while (remain_bytes) {
		size_t write_size;
		int32_t negative_chunk_size;

		if (remain_bytes < chunk_size)
			write_size = remain_bytes;
		else
			write_size = chunk_size;
		negative_chunk_size = -(int32_t)write_size;

		/* Read a chunk */
		res = TEE_ReadObjectData(object, chunk_buf, write_size,
				&read_bytes);
		if (res != TEE_SUCCESS) {
			EMSG("Failed to read data, res=0x%08x", res);
			goto exit;
		}

		if (read_bytes != write_size) {
			EMSG("Partial data read, bytes=%u", read_bytes);
			res = TEE_ERROR_CORRUPT_OBJECT;
			goto exit;
		}

		/* Seek to the position before read */
		res = TEE_SeekObjectData(object, negative_chunk_size,
				TEE_DATA_SEEK_CUR);
		if (res != TEE_SUCCESS) {
			EMSG("Failed to seek to previous offset");
			goto exit;
		}

		/* Write a chunk*/
		res = TEE_WriteObjectData(object, chunk_buf, write_size);
		if (res != TEE_SUCCESS) {
			EMSG("Failed to write data, res=0x%08x", res);
			goto exit;
		}

		remain_bytes -= write_size;
	}

	TEE_GetSystemTime(&stop_time);

	*spent_time_in_ms = get_delta_time_in_ms(start_time, stop_time);

	IMSG("start: %u.%u(s), stop: %u.%u(s), delta: %u(ms)",
			start_time.seconds, start_time.millis,
			stop_time.seconds, stop_time.millis,
			*spent_time_in_ms);

exit:
	return res;
}
Esempio n. 13
0
static TEE_Result test_trace(uint32_t param_types, TEE_Param params[4])
{
	IMSG("static TA \"%s\" says \"Hello world !\"", TA_NAME);

	return TEE_SUCCESS;
}
Esempio n. 14
0
/*
 * Called when a session is closed, sess_ctx hold the value that was
 * assigned by TA_OpenSessionEntryPoint().
 */
void TA_CloseSessionEntryPoint(void *sess_ctx)
{
	(void)&sess_ctx; /* Unused parameter */
	IMSG("Goodbye SDP\n");
}
Esempio n. 15
0
LWS_VISIBLE void lwsl_emit_syslog(int level, const char *line)
{
	IMSG("%d: %s\n", level, line);
}
Esempio n. 16
0
/*
 * Check if the /secure-chosen node in the DT contains an stdout-path value
 * for which we have a compatible driver. If so, switch the console to
 * this device.
 */
void configure_console_from_dt(unsigned long phys_fdt)
{
	const struct dt_driver *dt_drv;
	const struct serial_driver *sdrv;
	const struct fdt_property *prop;
	struct serial_chip *dev;
	char *stdout_data;
	const char *uart;
	const char *parms = NULL;
	void *fdt;
	int offs;
	char *p;

	if (!phys_fdt)
		return;
	fdt = phys_to_virt(phys_fdt, MEM_AREA_IO_NSEC);
	if (!fdt)
		panic();

	offs = fdt_path_offset(fdt, "/secure-chosen");
	if (offs < 0)
		return;
	prop = fdt_get_property(fdt, offs, "stdout-path", NULL);
	if (!prop) {
		/*
		 * /secure-chosen node present but no stdout-path property
		 * means we don't want any console output
		 */
		IMSG("Switching off console");
		register_serial_console(NULL);
		return;
	}

	stdout_data = strdup(prop->data);
	if (!stdout_data)
		return;
	p = strchr(stdout_data, ':');
	if (p) {
		*p = '\0';
		parms = p + 1;
	}

	/* stdout-path may refer to an alias */
	uart = fdt_get_alias(fdt, stdout_data);
	if (!uart) {
		/* Not an alias, assume we have a node path */
		uart = stdout_data;
	}
	offs = fdt_path_offset(fdt, uart);
	if (offs < 0)
		goto out;

	dt_drv = dt_find_compatible_driver(fdt, offs);
	if (!dt_drv)
		goto out;

	sdrv = (const struct serial_driver *)dt_drv->driver;
	if (!sdrv)
		goto out;
	dev = sdrv->dev_alloc();
	if (!dev)
		goto out;
	/*
	 * If the console is the same as the early console, dev_init() might
	 * clear pending data. Flush to avoid that.
	 */
	console_flush();
	if (sdrv->dev_init(dev, fdt, offs, parms) < 0) {
		sdrv->dev_free(dev);
		goto out;
	}

	IMSG("Switching console to device: %s", uart);
	register_serial_console(dev);
out:
	free(stdout_data);
}
Esempio n. 17
0
static TEE_Result ta_stroage_benchmark_chunk_access_test(uint32_t nCommandID,
		uint32_t param_types, TEE_Param params[4])
{
	TEE_Result res;
	size_t data_size;
	size_t chunk_size;
	TEE_ObjectHandle object = TEE_HANDLE_NULL;
	uint8_t *chunk_buf;
	uint32_t *spent_time_in_ms = &params[2].value.a;
	bool do_verify;

	ASSERT_PARAM_TYPE(param_types, TEE_PARAM_TYPES(
					TEE_PARAM_TYPE_VALUE_INPUT,
					TEE_PARAM_TYPE_VALUE_INPUT,
					TEE_PARAM_TYPE_VALUE_OUTPUT,
					TEE_PARAM_TYPE_NONE));

	data_size = params[0].value.a;
	chunk_size = params[0].value.b;
	do_verify = params[1].value.a;

	if (data_size == 0)
		data_size = DEFAULT_DATA_SIZE;

	if (chunk_size == 0)
		chunk_size = DEFAULT_CHUNK_SIZE;

	IMSG("command id: %u, test data size: %zd, chunk size: %zd\n",
			nCommandID, data_size, chunk_size);

	chunk_buf = TEE_Malloc(chunk_size, TEE_MALLOC_FILL_ZERO);
	if (!chunk_buf) {
		EMSG("Failed to allocate memory");
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto exit;
	}

	fill_buffer(chunk_buf, chunk_size);
	res = prepare_test_file(data_size, chunk_buf, chunk_size);
	if (res != TEE_SUCCESS) {
		EMSG("Failed to create test file, res=0x%08x",
				res);
		goto exit_free_chunk_buf;
	}

	res = TEE_OpenPersistentObject(TEE_STORAGE_PRIVATE,
			filename, sizeof(filename),
			TEE_DATA_FLAG_ACCESS_READ |
			TEE_DATA_FLAG_ACCESS_WRITE |
			TEE_DATA_FLAG_ACCESS_WRITE_META |
			TEE_DATA_FLAG_OVERWRITE,
			&object);
	if (res != TEE_SUCCESS) {
		EMSG("Failed to open persistent object, res=0x%08x",
				res);
		goto exit_remove_object;
	}

	switch (nCommandID) {
	case TA_STORAGE_BENCHMARK_CMD_TEST_READ:
		res = test_read(object, data_size, chunk_buf,
				chunk_size, spent_time_in_ms);
		break;

	case TA_STORAGE_BENCHMARK_CMD_TEST_WRITE:
		res = test_write(object, data_size, chunk_buf,
				chunk_size, spent_time_in_ms);
		break;

	case TA_STORAGE_BENCHMARK_CMD_TEST_REWRITE:
		res = test_rewrite(object, data_size, chunk_buf,
				chunk_size, spent_time_in_ms);
		break;

	default:
		res = TEE_ERROR_BAD_PARAMETERS;
	}

	if (res != TEE_SUCCESS)
		goto exit_remove_object;

	if (do_verify)
		res = verify_file_data(object, data_size,
				chunk_buf, chunk_size);


exit_remove_object:
	TEE_CloseAndDeletePersistentObject1(object);
exit_free_chunk_buf:
	TEE_Free(chunk_buf);
exit:

	return res;
}
Esempio n. 18
0
void
avl_balance(avl_tree_t **root){

	HMSG("balance() ... \n");

	switch( (*root)->flags ){
    
	case AVL_FLAGS_LEFT_UNBAL:
    {
		avl_tree_t *left_root = (*root)->left;
		switch(left_root->flags){

		case AVL_FLAGS_BALANCED:
			IMSG("Balanced sub-tree\n");
			break;
		case AVL_FLAGS_LEFT_HEAVY:
			/* Nothing to do ... */
			HMSG("LEFT LEFT\n");
			break;
		case AVL_FLAGS_RIGHT_HEAVY:
			HMSG("LEFT RIGHT\n");
			avl_left_rotate( &((*root)->left));
			break;

		case AVL_FLAGS_LEFT_UNBAL:
		case AVL_FLAGS_RIGHT_UNBAL:
			EMSG("Unbalanced sub-tree !!!\n");
			break;
		default:
			EMSG("Inconsistent Flags\n");
		}

		avl_right_rotate(root);

    }
    break;
	case AVL_FLAGS_RIGHT_UNBAL:
    {
		avl_tree_t *right_root = (*root)->right;
		switch(right_root->flags){

		case AVL_FLAGS_BALANCED:
			IMSG("Balanced sub-tree\n");
			break;
		case AVL_FLAGS_LEFT_HEAVY:
			HMSG("RIGHT LEFT\n");
			avl_right_rotate( &((*root)->right));
			break;
		case AVL_FLAGS_RIGHT_HEAVY:
			/* Nothing to do ... */
			HMSG("RIGHT RIGHT\n");
			break;

		case AVL_FLAGS_LEFT_UNBAL:
		case AVL_FLAGS_RIGHT_UNBAL:
			EMSG("Unbalanced sub-tree !!!\n");
			break;
		default:
			EMSG("Inconsistent Flags\n");
		}

		avl_left_rotate(root);

    }
    break;
	case AVL_FLAGS_BALANCED:
	case AVL_FLAGS_LEFT_HEAVY:
	case AVL_FLAGS_RIGHT_HEAVY:
		EMSG("useless call of avl_balance()\n");
		break;
	default:
		EMSG("Inconsistent Flags\n");
	}

}
Esempio n. 19
0
static void init_runtime(unsigned long pageable_part)
{
	size_t n;
	size_t init_size = (size_t)__init_size;
	size_t pageable_size = __pageable_end - __pageable_start;
	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
			   TEE_SHA256_HASH_SIZE;
	tee_mm_entry_t *mm;
	uint8_t *paged_store;
	uint8_t *hashes;

	assert(pageable_size % SMALL_PAGE_SIZE == 0);
	assert(hash_size == (size_t)__tmp_hashes_size);

	/*
	 * This needs to be initialized early to support address lookup
	 * in MEM_AREA_TEE_RAM
	 */
	tee_pager_early_init();

	thread_init_boot_thread();

	init_asan();

	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);

	hashes = malloc(hash_size);
	IMSG_RAW("\n");
	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
	assert(hashes);
	asan_memcpy_unchecked(hashes, __tmp_hashes_start, hash_size);

	/*
	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
	 * DDR below.
	 */
	teecore_init_ta_ram();

	carve_out_asan_mem(&tee_mm_sec_ddr);

	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
	assert(mm);
	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
	/*
	 * Load pageable part in the dedicated allocated area:
	 * - Move pageable non-init part into pageable area. Note bootloader
	 *   may have loaded it anywhere in TA RAM hence use memmove().
	 * - Copy pageable init part from current location into pageable area.
	 */
	memmove(paged_store + init_size,
		phys_to_virt(pageable_part,
			     core_mmu_get_type_by_pa(pageable_part)),
		__pageable_part_end - __pageable_part_start);
	asan_memcpy_unchecked(paged_store, __init_start, init_size);

	/* Check that hashes of what's in pageable area is OK */
	DMSG("Checking hashes of pageable area");
	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
		TEE_Result res;

		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
		if (res != TEE_SUCCESS) {
			EMSG("Hash failed for page %zu at %p: res 0x%x",
			     n, page, res);
			panic();
		}
	}

	/*
	 * Assert prepaged init sections are page aligned so that nothing
	 * trails uninited at the end of the premapped init area.
	 */
	assert(!(init_size & SMALL_PAGE_MASK));

	/*
	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
	 * is supplied to tee_pager_init() below.
	 */
	init_vcore(&tee_mm_vcore);

	/*
	 * Assign alias area for pager end of the small page block the rest
	 * of the binary is loaded into. We're taking more than needed, but
	 * we're guaranteed to not need more than the physical amount of
	 * TZSRAM.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore,
		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
	assert(mm);
	tee_pager_set_alias_area(mm);

	/*
	 * Claim virtual memory which isn't paged.
	 * Linear memory (flat map core memory) ends there.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
	assert(mm);

	/*
	 * Allocate virtual memory for the pageable area and let the pager
	 * take charge of all the pages already assigned to that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
			   pageable_size);
	assert(mm);
	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
				TEE_MATTR_PRX, paged_store, hashes);

	tee_pager_add_pages((vaddr_t)__pageable_start,
			init_size / SMALL_PAGE_SIZE, false);
	tee_pager_add_pages((vaddr_t)__pageable_start + init_size,
			(pageable_size - init_size) / SMALL_PAGE_SIZE, true);

	/*
	 * There may be physical pages in TZSRAM before the core load address.
	 * These pages can be added to the physical pages pool of the pager.
	 * This setup may happen when a the secure bootloader runs in TZRAM
	 * and its memory can be reused by OP-TEE once boot stages complete.
	 */
	tee_pager_add_pages(tee_mm_vcore.lo,
			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
			true);
}