Example #1
0
int cbmem_initialize_id_size(u32 id, u64 size)
{
	struct imd *imd;
	struct imd imd_backing;
	const int recovery = 1;

	cbmem_top_init_once();

	imd = imd_init_backing(&imd_backing);
	imd_handle_init(imd, cbmem_top());

	if (imd_recover(imd))
		return 1;

#if defined(__PRE_RAM__)
	/*
	 * Lock the imd in romstage on a recovery. The assumption is that
	 * if the imd area was recovered in romstage then S3 resume path
	 * is being taken.
	 */
	imd_lockdown(imd);
#endif

	/* Add the specified range first */
	if (size)
		cbmem_add(id, size);

	/* Complete migration to CBMEM. */
	cbmem_run_init_hooks(recovery);

	/* Recovery successful. */
	return 0;
}
Example #2
0
void cbmem_initialize_empty_id_size(u32 id, u64 size)
{
	struct imd *imd;
	struct imd imd_backing;
	const int no_recovery = 0;

	cbmem_top_init_once();

	imd = imd_init_backing(&imd_backing);
	imd_handle_init(imd, cbmem_top());

	printk(BIOS_DEBUG, "CBMEM:\n");

	if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
					CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
		printk(BIOS_DEBUG, "failed.\n");
		return;
	}

	/* Add the specified range first */
	if (size)
		cbmem_add(id, size);

	/* Complete migration to CBMEM. */
	cbmem_run_init_hooks(no_recovery);
}
void cbmem_initialize_empty(void)
{
	uintptr_t pointer_addr;
	uintptr_t root_addr;
	unsigned long max_entries;
	struct cbmem_root *root;
	struct cbmem_root_pointer *pointer;

	/* Place the root pointer and the root. The number of entries is
	 * dictated by difference between the root address and the pointer
	 * where the root address is aligned down to
	 * DYN_CBMEM_ALIGN_SIZE. The pointer falls just below the
	 * address returned by get_top_aligned(). */
	pointer_addr = get_top_aligned();
	if (pointer_addr == 0)
		return;

	root_addr = pointer_addr - ROOT_MIN_SIZE;
	root_addr &= ~(DYN_CBMEM_ALIGN_SIZE - 1);
	pointer_addr -= sizeof(struct cbmem_root_pointer);

	max_entries = (pointer_addr - (root_addr + sizeof(*root))) /
	              sizeof(struct cbmem_entry);

	pointer = (void *)pointer_addr;
	pointer->magic = CBMEM_POINTER_MAGIC;
	pointer->root = root_addr;

	root = (void *)root_addr;
	root->max_entries = max_entries;
	root->num_entries = 0;
	root->locked = 0;
	root->size = pointer_addr - root_addr +
	             sizeof(struct cbmem_root_pointer);

	/* Add an entry covering the root region. */
	cbmem_entry_append(root, CBMEM_ID_ROOT, root_addr, root->size);

	printk(BIOS_DEBUG, "CBMEM: root @ %p %d entries.\n",
	       root, root->max_entries);

	/* Complete migration to CBMEM. */
	cbmem_run_init_hooks();
}
int cbmem_initialize(void)
{
	struct cbmem_root *root;
	uintptr_t top_according_to_root;

	root = get_root();

	/* No recovery possible since root couldn't be recovered. */
	if (root == NULL)
		return cbmem_fail_recovery();

	/* Sanity check the root. */
	top_according_to_root = (root->size + (uintptr_t)root);
	if (get_top_aligned() != top_according_to_root)
		return cbmem_fail_recovery();

	if (root->num_entries > root->max_entries)
		return cbmem_fail_recovery();

	if ((root->max_entries * sizeof(struct cbmem_entry)) >
	    (root->size - sizeof(struct cbmem_root_pointer) - sizeof(*root)))
		return cbmem_fail_recovery();

	/* Validate current entries. */
	if (validate_entries(root))
		return cbmem_fail_recovery();

#if defined(__PRE_RAM__)
	/* Lock the root in the romstage on a recovery. The assumption is that
	 * recovery is called during romstage on the S3 resume path. */
	root->locked = 1;
#endif

	/* Complete migration to CBMEM. */
	cbmem_run_init_hooks();

	/* Recovery successful. */
	return 0;
}