Esempio n. 1
0
void cbmem_initialize_empty_id_size(u32 id, u64 size)
{
	struct imd *imd;
	struct imd imd_backing;
	const int no_recovery = 0;

	cbmem_top_init_once();

	imd = imd_init_backing(&imd_backing);
	imd_handle_init(imd, cbmem_top());

	printk(BIOS_DEBUG, "CBMEM:\n");

	if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
					CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
		printk(BIOS_DEBUG, "failed.\n");
		return;
	}

	/* Add the specified range first */
	if (size)
		cbmem_add(id, size);

	/* Complete migration to CBMEM. */
	cbmem_run_init_hooks(no_recovery);
}
Esempio n. 2
0
int cbmem_initialize_id_size(u32 id, u64 size)
{
	struct imd *imd;
	struct imd imd_backing;
	const int recovery = 1;

	cbmem_top_init_once();

	imd = imd_init_backing(&imd_backing);
	imd_handle_init(imd, cbmem_top());

	if (imd_recover(imd))
		return 1;

#if defined(__PRE_RAM__)
	/*
	 * Lock the imd in romstage on a recovery. The assumption is that
	 * if the imd area was recovered in romstage then S3 resume path
	 * is being taken.
	 */
	imd_lockdown(imd);
#endif

	/* Add the specified range first */
	if (size)
		cbmem_add(id, size);

	/* Complete migration to CBMEM. */
	cbmem_run_init_hooks(recovery);

	/* Recovery successful. */
	return 0;
}
Esempio n. 3
0
int imd_recover(struct imd *imd)
{
	const struct imd_entry *e;
	uintptr_t small_upper_limit;
	struct imdr *imdr;

	imdr = &imd->lg;
	if (imdr_recover(imdr) != 0)
		return -1;

	/* Determine if small region is region is present. */
	e = imdr_entry_find(imdr, SMALL_REGION_ID);

	if (e == NULL)
		return 0;

	small_upper_limit = (uintptr_t)imdr_entry_at(imdr, e);
	small_upper_limit += imdr_entry_size(imdr, e);

	imd->sm.limit = small_upper_limit;

	/* Tear down any changes on failure. */
	if (imdr_recover(&imd->sm) != 0) {
		imd_handle_init(imd, (void *)imd->lg.limit);
		return -1;
	}

	return 0;
}
Esempio n. 4
0
void imd_handle_init_partial_recovery(struct imd *imd)
{
	const struct imd_entry *e;
	struct imd_root_pointer *rp;
	struct imdr *imdr;

	if (imd->lg.limit == 0)
		return;

	imd_handle_init(imd, (void *)imd->lg.limit);

	/* Initialize root pointer for the large regions. */
	imdr = &imd->lg;
	rp = imdr_get_root_pointer(imdr);
	imdr->r = relative_pointer(rp, rp->root_offset);

	e = imdr_entry_find(imdr, SMALL_REGION_ID);

	if (e == NULL)
		return;

	imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
	imd->sm.limit += imdr_entry_size(imdr, e);
	imdr = &imd->sm;
	rp = imdr_get_root_pointer(imdr);
	imdr->r = relative_pointer(rp, rp->root_offset);
}
Esempio n. 5
0
static void stage_cache_recover(void)
{
	struct imd *imd;
	void *base;
	size_t size;

	imd = imd_get();
	stage_cache_external_region(&base, &size);
	imd_handle_init(imd, (void *)(size + (uintptr_t)base));
	if (imd_recover(imd))
		printk(BIOS_DEBUG, "Unable to recover external stage cache.\n");
}
Esempio n. 6
0
static struct imd *imd_init_backing_with_recover(struct imd *backing)
{
	struct imd *imd;

	imd = imd_init_backing(backing);
	if (!CAN_USE_GLOBALS) {
		/* Always partially recover if we can't keep track of whether
		 * we have already initialized CBMEM in this stage. */
		imd_handle_init(imd, cbmem_top());
		imd_handle_init_partial_recovery(imd);
	}

	return imd;
}
Esempio n. 7
0
static void stage_cache_create_empty(void)
{
	struct imd *imd;
	void *base;
	size_t size;

	imd = imd_get();
	stage_cache_external_region(&base, &size);
	imd_handle_init(imd, (void *)(size + (uintptr_t)base));

	printk(BIOS_DEBUG, "External stage cache:\n");
	imd_create_tiered_empty(imd, 4096, 4096, 1024, 32);
	if (imd_limit_size(imd, size))
		printk(BIOS_DEBUG, "Could not limit stage cache size.\n");
}
Esempio n. 8
0
int imd_create_tiered_empty(struct imd *imd,
				size_t lg_root_size, size_t lg_entry_align,
				size_t sm_root_size, size_t sm_entry_align)
{
	size_t sm_region_size;;
	const struct imd_entry *e;
	struct imdr *imdr;

	imdr = &imd->lg;

	if (imdr_create_empty(imdr, lg_root_size, lg_entry_align) != 0)
		return -1;

	/* Calculate the size of the small region to request. */
	sm_region_size = root_num_entries(sm_root_size) * sm_entry_align;
	sm_region_size += sm_root_size;
	sm_region_size = ALIGN_UP(sm_region_size, lg_entry_align);

	/* Add a new entry to the large region to cover the root and entries. */
	e = imdr_entry_add(imdr, SMALL_REGION_ID, sm_region_size);

	if (e == NULL)
		goto fail;

	imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
	imd->sm.limit += sm_region_size;

	if (imdr_create_empty(&imd->sm, sm_root_size, sm_entry_align) != 0 ||
		imdr_limit_size(&imd->sm, sm_region_size))
		goto fail;

	return 0;
fail:
	imd_handle_init(imd, (void *)imdr->limit);
	return -1;
}