Esempio n. 1
0
static inline void *cbmem_top_cached(void)
{
#if !defined(__PRE_RAM__)
	if (cached_cbmem_top == NULL)
		cached_cbmem_top = cbmem_top();

	return cached_cbmem_top;
#else
	return cbmem_top();
#endif
}
Esempio n. 2
0
void fsp_verify_memory_init_hobs(void)
{
	struct range_entry fsp_mem;
	struct range_entry tolum;

	/* Lookup the FSP_BOOTLOADER_TOLUM_HOB */
	if (fsp_find_bootloader_tolum(&tolum))
		die("9.3: FSP_BOOTLOADER_TOLUM_HOB missing!\n");
	if (range_entry_size(&tolum) < cbmem_overhead_size()) {
		printk(BIOS_CRIT,
			"FSP_BOOTLOADER_TOLUM_SIZE: 0x%08llx < 0x%08zx\n",
			range_entry_size(&tolum), cbmem_overhead_size());
		die("FSP_BOOTLOADER_TOLUM_HOB too small!\n");
	}

	/* Locate the FSP reserved memory area */
	if (fsp_find_reserved_memory(&fsp_mem))
		die("9.1: FSP_RESERVED_MEMORY_RESOURCE_HOB missing!\n");

	/* Verify the the bootloader tolum is above the FSP reserved area */
	if (range_entry_end(&tolum) <= range_entry_base(&fsp_mem)) {
		printk(BIOS_CRIT,
			"TOLUM end: 0x%08llx != 0x%08llx: FSP rsvd base\n",
			range_entry_end(&tolum), range_entry_base(&fsp_mem));
		die("FSP reserved region after BIOS TOLUM!\n");
	}
	if (range_entry_base(&tolum) < range_entry_end(&fsp_mem)) {
		printk(BIOS_CRIT,
			"TOLUM base: 0x%08llx < 0x%08llx: FSP rsvd end\n",
			range_entry_base(&tolum), range_entry_end(&fsp_mem));
		die("FSP reserved region overlaps BIOS TOLUM!\n");
	}

	/* Verify that the FSP reserved area immediately follows the BIOS
	 * reserved area
	 */
	if (range_entry_base(&tolum) != range_entry_end(&fsp_mem)) {
		printk(BIOS_CRIT,
			"TOLUM base: 0x%08llx != 0x%08llx: FSP rsvd end\n",
			range_entry_base(&tolum), range_entry_end(&fsp_mem));
		die("Space between FSP reserved region and BIOS TOLUM!\n");
	}

	if (range_entry_end(&tolum) != (uintptr_t)cbmem_top()) {
		printk(BIOS_CRIT, "TOLUM end: 0x%08llx != 0x%p: cbmem_top\n",
			range_entry_end(&tolum), cbmem_top());
		die("Space between cbmem_top and BIOS TOLUM!\n");
	}
}
Esempio n. 3
0
int cbmem_initialize_id_size(u32 id, u64 size)
{
	struct imd *imd;
	struct imd imd_backing;
	const int recovery = 1;

	cbmem_top_init_once();

	imd = imd_init_backing(&imd_backing);
	imd_handle_init(imd, cbmem_top());

	if (imd_recover(imd))
		return 1;

#if defined(__PRE_RAM__)
	/*
	 * Lock the imd in romstage on a recovery. The assumption is that
	 * if the imd area was recovered in romstage then S3 resume path
	 * is being taken.
	 */
	imd_lockdown(imd);
#endif

	/* Add the specified range first */
	if (size)
		cbmem_add(id, size);

	/* Complete migration to CBMEM. */
	cbmem_run_init_hooks(recovery);

	/* Recovery successful. */
	return 0;
}
Esempio n. 4
0
void cbmem_initialize_empty_id_size(u32 id, u64 size)
{
	struct imd *imd;
	struct imd imd_backing;
	const int no_recovery = 0;

	cbmem_top_init_once();

	imd = imd_init_backing(&imd_backing);
	imd_handle_init(imd, cbmem_top());

	printk(BIOS_DEBUG, "CBMEM:\n");

	if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
					CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
		printk(BIOS_DEBUG, "failed.\n");
		return;
	}

	/* Add the specified range first */
	if (size)
		cbmem_add(id, size);

	/* Complete migration to CBMEM. */
	cbmem_run_init_hooks(no_recovery);
}
Esempio n. 5
0
void stage_cache_external_region(void **base, size_t *size)
{
	/*
	 * The ramstage cache lives in the TSEG region at RESERVED_SMM_OFFSET.
	 * The top of RAM is defined to be the TSEG base address.
	 */
	*size = RESERVED_SMM_SIZE;
	*base = (void *)((uintptr_t)cbmem_top() + RESERVED_SMM_OFFSET);
}
Esempio n. 6
0
struct ramstage_cache *ramstage_cache_location(long *size)
{
	/* The ramstage cache lives in the TSEG region.
	 * The top of ram is defined to be the TSEG base address. */
	u32 offset = smm_region_size();
	offset -= CONFIG_IED_REGION_SIZE;
	offset -= CONFIG_SMM_RESERVED_SIZE;

	*size = CONFIG_SMM_RESERVED_SIZE;
	return (void *)(cbmem_top() + offset);
}
Esempio n. 7
0
void stage_cache_external_region(void **base, size_t *size)
{
	char *smm_base;
	/* 1MiB cache size */
	const long cache_size = CONFIG_SMM_RESERVED_SIZE;

	/* Ramstage cache lives in TSEG region which is the definition of
	 * cbmem_top(). */
	smm_base = cbmem_top();
	*size = cache_size;
	*base = &smm_base[smm_region_size() - cache_size];
}
Esempio n. 8
0
struct ramstage_cache *ramstage_cache_location(long *size)
{
	char *smm_base;
	/* 1MiB cache size */
	const long cache_size = CONFIG_SMM_RESERVED_SIZE;

	/* Ramstage cache lives in TSEG region which is the definition of
	 * cbmem_top(). */
	smm_base = cbmem_top();
	*size = cache_size;
	return (void *)&smm_base[smm_region_size() - cache_size];
}
Esempio n. 9
0
static struct imd *imd_init_backing_with_recover(struct imd *backing)
{
	struct imd *imd;

	imd = imd_init_backing(backing);
	if (!CAN_USE_GLOBALS) {
		/* Always partially recover if we can't keep track of whether
		 * we have already initialized CBMEM in this stage. */
		imd_handle_init(imd, cbmem_top());
		imd_handle_init_partial_recovery(imd);
	}

	return imd;
}
Esempio n. 10
0
static void nc_read_resources(struct device *dev)
{
	unsigned long base_k;
	int index = 0;
	unsigned long size_k;

	/* Read standard PCI resources. */
	pci_dev_read_resources(dev);

	/* 0 -> 0xa0000 */
	base_k = 0;
	size_k = 0xa0000 - base_k;
	ram_resource(dev, index++, RES_IN_KIB(base_k), RES_IN_KIB(size_k));

	/*
	 * Reserve everything between A segment and 1MB:
	 *
	 * 0xa0000 - 0xbffff: legacy VGA
	 * 0xc0000 - 0xdffff: RAM
	 * 0xe0000 - 0xfffff: ROM shadow
	 */
	base_k += size_k;
	size_k = 0xc0000 - base_k;
	mmio_resource(dev, index++, RES_IN_KIB(base_k), RES_IN_KIB(size_k));

	base_k += size_k;
	size_k = 0x100000 - base_k;
	reserved_ram_resource(dev, index++, RES_IN_KIB(base_k),
		RES_IN_KIB(size_k));

	/* 0x100000 -> cbmem_top - cacheable and usable */
	base_k += size_k;
	size_k = (unsigned long)cbmem_top() - base_k;
	ram_resource(dev, index++, RES_IN_KIB(base_k), RES_IN_KIB(size_k));

	/* cbmem_top -> 0xc0000000 - reserved */
	base_k += size_k;
	size_k = 0xc0000000 - base_k;
	reserved_ram_resource(dev, index++, RES_IN_KIB(base_k),
		RES_IN_KIB(size_k));

	/* 0xc0000000 -> 4GiB is mmio. */
	base_k += size_k;
	size_k = 0x100000000ull - base_k;
	mmio_resource(dev, index++, RES_IN_KIB(base_k), RES_IN_KIB(size_k));
}
Esempio n. 11
0
asmlinkage void *car_stage_c_entry(void)
{
	struct postcar_frame pcf;
	bool s3wake;
	uintptr_t top_of_ram;
	uintptr_t top_of_low_usable_memory;

	post_code(0x20);
	console_init();

	/* Initialize DRAM */
	s3wake = fill_power_state() == ACPI_S3;
	fsp_memory_init(s3wake);

	/* Disable the ROM shadow 0x000e0000 - 0x000fffff */
	disable_rom_shadow();

	/* Initialize the PCIe bridges */
	pcie_init();

	if (postcar_frame_init(&pcf, 1*KiB))
		die("Unable to initialize postcar frame.\n");

	/* Locate the top of RAM */
	top_of_low_usable_memory = (uintptr_t) cbmem_top();
	top_of_ram = ALIGN(top_of_low_usable_memory, 16 * MiB);

	/* Cache postcar and ramstage */
	postcar_frame_add_mtrr(&pcf, top_of_ram - (16 * MiB), 16 * MiB,
		MTRR_TYPE_WRBACK);

	/* Cache RMU area */
	postcar_frame_add_mtrr(&pcf, (uintptr_t) top_of_low_usable_memory,
		0x10000, MTRR_TYPE_WRTHROUGH);

	/* Cache ESRAM */
	postcar_frame_add_mtrr(&pcf, 0x80000000, 0x80000, MTRR_TYPE_WRBACK);

	/* Cache SPI flash - Write protect not supported */
	postcar_frame_add_mtrr(&pcf, (uint32_t)(-CONFIG_ROM_SIZE),
		CONFIG_ROM_SIZE, MTRR_TYPE_WRTHROUGH);

	run_postcar_phase(&pcf);
	return NULL;
}
Esempio n. 12
0
static void mc_add_dram_resources(device_t dev)
{
	u32 bmbound, bsmmrrl;
	int index = 0;
	uint64_t highmem_size = 0;
	uint32_t fsp_mem_base = 0;

	GetHighMemorySize(&highmem_size);
	fsp_mem_base=(uint32_t)cbmem_top();

	bmbound = iosf_bunit_read(BUNIT_BMBOUND);
	bsmmrrl = iosf_bunit_read(BUNIT_SMRRL) << 20;

	if (bsmmrrl){
		printk(BIOS_DEBUG, "UMA, GTT & SMM memory location: 0x%x\n"
				"UMA, GTT & SMM memory size: %dM\n",
				bsmmrrl, (bmbound - bsmmrrl) >> 20);

		printk(BIOS_DEBUG, "FSP memory location: 0x%x\nFSP memory size: %dM\n",
				fsp_mem_base, (bsmmrrl - fsp_mem_base) >> 20);
	}

	printk(BIOS_INFO, "Available memory below 4GB: 0x%08x (%dM)\n",
			fsp_mem_base, fsp_mem_base >> 20);

	/* Report the memory regions. */
	ram_resource(dev, index++, 0, legacy_hole_base_k);
	ram_resource(dev, index++, legacy_hole_base_k + legacy_hole_size_k,
	     ((fsp_mem_base >> 10) - (legacy_hole_base_k + legacy_hole_size_k)));

	/* Mark SMM & FSP regions reserved */
	mmio_resource(dev, index++, fsp_mem_base >> 10,
			(bmbound - fsp_mem_base) >> 10);

	if (highmem_size) {
		ram_resource(dev, index++, 0x100000000 >> 10, highmem_size >> 10 );
	}
	printk(BIOS_INFO, "Available memory above 4GB: %lluM\n",
			highmem_size >> 20);

	index = add_fixed_resources(dev, index);
}
Esempio n. 13
0
asmlinkage void car_stage_entry(void)
{
	bool s3wake;
	struct postcar_frame pcf;
	uintptr_t top_of_ram;
	struct chipset_power_state *ps = pmc_get_power_state();

	console_init();

	/* Program MCHBAR, DMIBAR, GDXBAR and EDRAMBAR */
	systemagent_early_init();
	/* initialize Heci interface */
	heci_init(HECI1_BASE_ADDRESS);

	timestamp_add_now(TS_START_ROMSTAGE);
	s3wake = pmc_fill_power_state(ps) == ACPI_S3;
	fsp_memory_init(s3wake);
	pmc_set_disb();
	if (!s3wake)
		save_dimm_info();
	if (postcar_frame_init(&pcf, 1 * KiB))
		die("Unable to initialize postcar frame.\n");

	/*
	 * We need to make sure ramstage will be run cached. At this
	 * point exact location of ramstage in cbmem is not known.
	 * Instruct postcar to cache 16 megs under cbmem top which is
	 * a safe bet to cover ramstage.
	 */
	top_of_ram = (uintptr_t) cbmem_top();
	printk(BIOS_DEBUG, "top_of_ram = 0x%lx\n", top_of_ram);
	top_of_ram -= 16*MiB;
	postcar_frame_add_mtrr(&pcf, top_of_ram, 16*MiB, MTRR_TYPE_WRBACK);

	/* Cache the ROM as WP just below 4GiB. */
	postcar_frame_add_romcache(&pcf, MTRR_TYPE_WRPROT);

	run_postcar_phase(&pcf);
}
Esempio n. 14
0
/* setup_stack_and_mtrrs() determines the stack to use after
 * cache-as-ram is torn down as well as the MTRR settings to use. */
static void platform_enter_postcar(void)
{
	struct postcar_frame pcf;
	uintptr_t top_of_ram;

	if (postcar_frame_init(&pcf, ROMSTAGE_RAM_STACK_SIZE))
		die("Unable to initialize postcar frame.\n");
	/* Cache the ROM as WP just below 4GiB. */
	postcar_frame_add_mtrr(&pcf, CACHE_ROM_BASE, CACHE_ROM_SIZE,
			       MTRR_TYPE_WRPROT);

	/* Cache RAM as WB from 0 -> CACHE_TMP_RAMTOP. */
	postcar_frame_add_mtrr(&pcf, 0, CACHE_TMP_RAMTOP, MTRR_TYPE_WRBACK);

	/* Cache at least 8 MiB below the top of ram, and at most 8 MiB
	 * above top of the ram. This satisfies MTRR alignment requirement
	 * with different TSEG size configurations.
	 */
	top_of_ram = ALIGN_DOWN((uintptr_t)cbmem_top(), 8*MiB);
	postcar_frame_add_mtrr(&pcf, top_of_ram - 8*MiB, 16*MiB,
			       MTRR_TYPE_WRBACK);

	run_postcar_phase(&pcf);
}
Esempio n. 15
0
asmlinkage void car_stage_entry(void)
{
	struct postcar_frame pcf;
	uintptr_t top_of_ram;
	bool s3wake;
	struct chipset_power_state *ps = car_get_var_ptr(&power_state);
	void *smm_base;
	size_t smm_size, var_size;
	const void *new_var_data;
	uintptr_t tseg_base;

	timestamp_add_now(TS_START_ROMSTAGE);

	soc_early_romstage_init();
	disable_watchdog();

	console_init();

	s3wake = fill_power_state(ps) == ACPI_S3;
	fsp_memory_init(s3wake);

	if (punit_init())
		set_max_freq();
	else
		printk(BIOS_DEBUG, "Punit failed to initialize properly\n");

	/* Stash variable MRC data and let cache system update it later */
	new_var_data = fsp_find_extension_hob_by_guid(hob_variable_guid,
							&var_size);
	if (new_var_data)
		mrc_cache_stash_vardata(new_var_data, var_size,
					car_get_var(fsp_version));
	else
		printk(BIOS_ERR, "Failed to determine variable data\n");

	if (postcar_frame_init(&pcf, 1*KiB))
		die("Unable to initialize postcar frame.\n");

	mainboard_save_dimm_info();

	/*
	 * We need to make sure ramstage will be run cached. At this point exact
	 * location of ramstage in cbmem is not known. Instruct postcar to cache
	 * 16 megs under cbmem top which is a safe bet to cover ramstage.
	 */
	top_of_ram = (uintptr_t) cbmem_top();
	/* cbmem_top() needs to be at least 16 MiB aligned */
	assert(ALIGN_DOWN(top_of_ram, 16*MiB) == top_of_ram);
	postcar_frame_add_mtrr(&pcf, top_of_ram - 16*MiB, 16*MiB, MTRR_TYPE_WRBACK);

	/* Cache the memory-mapped boot media. */
	if (IS_ENABLED(CONFIG_BOOT_DEVICE_MEMORY_MAPPED))
		postcar_frame_add_mtrr(&pcf, -CONFIG_ROM_SIZE, CONFIG_ROM_SIZE,
					MTRR_TYPE_WRPROT);

	/*
	* Cache the TSEG region at the top of ram. This region is
	* not restricted to SMM mode until SMM has been relocated.
	* By setting the region to cacheable it provides faster access
	* when relocating the SMM handler as well as using the TSEG
	* region for other purposes.
	*/
	smm_region(&smm_base, &smm_size);
	tseg_base = (uintptr_t)smm_base;
	postcar_frame_add_mtrr(&pcf, tseg_base, smm_size, MTRR_TYPE_WRBACK);

	run_postcar_phase(&pcf);
}
Esempio n. 16
0
/* setup_stack_and_mttrs() determines the stack to use after
 * cache-as-ram is torn down as well as the MTRR settings to use. */
static void *setup_stack_and_mttrs(void)
{
	unsigned long top_of_stack;
	int num_mtrrs;
	uint32_t *slot;
	uint32_t mtrr_mask_upper;
	uint32_t top_of_ram;

	/* Top of stack needs to be aligned to a 4-byte boundary. */
	top_of_stack = choose_top_of_stack() & ~3;
	slot = (void *)top_of_stack;
	num_mtrrs = 0;

	/* The upper bits of the MTRR mask need to set according to the number
	 * of physical address bits. */
	mtrr_mask_upper = (1 << ((cpuid_eax(0x80000008) & 0xff) - 32)) - 1;

	/* The order for each MTRR is value then base with upper 32-bits of
	 * each value coming before the lower 32-bits. The reasoning for
	 * this ordering is to create a stack layout like the following:
	 *   +0: Number of MTRRs
	 *   +4: MTRR base 0 31:0
	 *   +8: MTRR base 0 63:32
	 *  +12: MTRR mask 0 31:0
	 *  +16: MTRR mask 0 63:32
	 *  +20: MTRR base 1 31:0
	 *  +24: MTRR base 1 63:32
	 *  +28: MTRR mask 1 31:0
	 *  +32: MTRR mask 1 63:32
	 */

	/* Cache the ROM as WP just below 4GiB. */
	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
	slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRRphysMaskValid);
	slot = stack_push(slot, 0); /* upper base */
	slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_TYPE_WRPROT);
	num_mtrrs++;

	/* Cache RAM as WB from 0 -> CONFIG_RAMTOP. */
	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
	slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid);
	slot = stack_push(slot, 0); /* upper base */
	slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK);
	num_mtrrs++;

	top_of_ram = (uint32_t)cbmem_top();
	/* Cache 8MiB below the top of ram. The top of ram under 4GiB is the
	 * start of the TSEG region. It is required to be 8MiB aligned. Set
	 * this area as cacheable so it can be used later for ramstage before
	 * setting up the entire RAM as cacheable. */
	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
	slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
	slot = stack_push(slot, 0); /* upper base */
	slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK);
	num_mtrrs++;

	/* Cache 8MiB at the top of ram. Top of ram is where the TSEG
	 * region resides. However, it is not restricted to SMM mode until
	 * SMM has been relocated. By setting the region to cacheable it
	 * provides faster access when relocating the SMM handler as well
	 * as using the TSEG region for other purposes. */
	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
	slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
	slot = stack_push(slot, 0); /* upper base */
	slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK);
	num_mtrrs++;

	/* Save the number of MTRRs to setup. Return the stack location
	 * pointing to the number of MTRRs. */
	slot = stack_push(slot, num_mtrrs);

	return slot;
}
Esempio n. 17
0
/* setup_romstage_stack_after_car() determines the stack to use after
 * cache-as-ram is torn down as well as the MTRR settings to use. */
static void *setup_romstage_stack_after_car(void)
{
	int num_mtrrs;
	u32 *slot;
	u32 mtrr_mask_upper;
	u32 top_of_ram;

	/* Top of stack needs to be aligned to a 4-byte boundary. */
	slot = (void *)romstage_ram_stack_top();
	num_mtrrs = 0;

	/* The upper bits of the MTRR mask need to set according to the number
	 * of physical address bits. */
	mtrr_mask_upper = (1 << (cpu_phys_address_size() - 32)) - 1;

	/* The order for each MTRR is value then base with upper 32-bits of
	 * each value coming before the lower 32-bits. The reasoning for
	 * this ordering is to create a stack layout like the following:
	 *   +0: Number of MTRRs
	 *   +4: MTRR base 0 31:0
	 *   +8: MTRR base 0 63:32
	 *  +12: MTRR mask 0 31:0
	 *  +16: MTRR mask 0 63:32
	 *  +20: MTRR base 1 31:0
	 *  +24: MTRR base 1 63:32
	 *  +28: MTRR mask 1 31:0
	 *  +32: MTRR mask 1 63:32
	 */

	/* Cache the ROM as WP just below 4GiB. */
	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
	slot = stack_push(slot, ~(CACHE_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID);
	slot = stack_push(slot, 0); /* upper base */
	slot = stack_push(slot, ~(CACHE_ROM_SIZE - 1) | MTRR_TYPE_WRPROT);
	num_mtrrs++;

	/* Cache RAM as WB from 0 -> CACHE_TMP_RAMTOP. */
	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
	slot = stack_push(slot, ~(CACHE_TMP_RAMTOP - 1) | MTRR_PHYS_MASK_VALID);
	slot = stack_push(slot, 0); /* upper base */
	slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK);
	num_mtrrs++;

	top_of_ram = (uint32_t)cbmem_top();
	/* Cache 8MiB below the top of RAM. On haswell systems the top of
	 * RAM under 4GiB is the start of the TSEG region. It is required to
	 * be 8MiB aligned. Set this area as cacheable so it can be used later
	 * for ramstage before setting up the entire RAM as cacheable. */
	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
	slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
	slot = stack_push(slot, 0); /* upper base */
	slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK);
	num_mtrrs++;

	/* Cache 8MiB at the top of RAM. Top of RAM on haswell systems
	 * is where the TSEG region resides. However, it is not restricted
	 * to SMM mode until SMM has been relocated. By setting the region
	 * to cacheable it provides faster access when relocating the SMM
	 * handler as well as using the TSEG region for other purposes. */
	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
	slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
	slot = stack_push(slot, 0); /* upper base */
	slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK);
	num_mtrrs++;

	/* Save the number of MTRRs to setup. Return the stack location
	 * pointing to the number of MTRRs. */
	slot = stack_push(slot, num_mtrrs);

	return slot;
}
Esempio n. 18
0
void raminit(struct romstage_params *params)
{
	const EFI_GUID bootldr_tolum_guid = FSP_BOOTLOADER_TOLUM_HOB_GUID;
	EFI_HOB_RESOURCE_DESCRIPTOR *cbmem_root;
	FSP_INFO_HEADER *fsp_header;
	EFI_HOB_RESOURCE_DESCRIPTOR *fsp_memory;
	FSP_MEMORY_INIT fsp_memory_init;
	FSP_MEMORY_INIT_PARAMS fsp_memory_init_params;
	const EFI_GUID fsp_reserved_guid =
		FSP_RESERVED_MEMORY_RESOURCE_HOB_GUID;
	void *fsp_reserved_memory_area;
	FSP_INIT_RT_COMMON_BUFFER fsp_rt_common_buffer;
	void *hob_list_ptr;
	FSP_SMBIOS_MEMORY_INFO *memory_info_hob;
	const EFI_GUID memory_info_hob_guid = FSP_SMBIOS_MEMORY_INFO_GUID;
	MEMORY_INIT_UPD memory_init_params;
	const EFI_GUID mrc_guid = FSP_NON_VOLATILE_STORAGE_HOB_GUID;
	u32 *mrc_hob;
	u32 fsp_reserved_bytes;
	MEMORY_INIT_UPD *original_params;
	struct pei_data *pei_ptr;
	EFI_STATUS status;
	VPD_DATA_REGION *vpd_ptr;
	UPD_DATA_REGION *upd_ptr;
	int fsp_verification_failure = 0;
#if IS_ENABLED(CONFIG_DISPLAY_HOBS)
	unsigned long int data;
	EFI_PEI_HOB_POINTERS hob_ptr;
#endif

	/*
	 * Find and copy the UPD region to the stack so the platform can modify
	 * the settings if needed.  Modifications to the UPD buffer are done in
	 * the platform callback code.  The platform callback code is also
	 * responsible for assigning the UpdDataRngPtr to this buffer if any
	 * updates are made.  The default state is to leave the UpdDataRngPtr
	 * set to NULL.  This indicates that the FSP code will use the UPD
	 * region in the FSP binary.
	 */
	post_code(0x34);
	fsp_header = params->chipset_context;
	vpd_ptr = (VPD_DATA_REGION *)(fsp_header->CfgRegionOffset +
					fsp_header->ImageBase);
	printk(BIOS_DEBUG, "VPD Data: 0x%p\n", vpd_ptr);
	upd_ptr = (UPD_DATA_REGION *)(vpd_ptr->PcdUpdRegionOffset +
					fsp_header->ImageBase);
	printk(BIOS_DEBUG, "UPD Data: 0x%p\n", upd_ptr);
	original_params = (void *)((u8 *)upd_ptr +
		upd_ptr->MemoryInitUpdOffset);
	memcpy(&memory_init_params, original_params,
		sizeof(memory_init_params));

	/* Zero fill RT Buffer data and start populating fields. */
	memset(&fsp_rt_common_buffer, 0, sizeof(fsp_rt_common_buffer));
	pei_ptr = params->pei_data;
	if (pei_ptr->boot_mode == SLEEP_STATE_S3) {
		fsp_rt_common_buffer.BootMode = BOOT_ON_S3_RESUME;
	} else if (pei_ptr->saved_data != NULL) {
		fsp_rt_common_buffer.BootMode =
			BOOT_ASSUMING_NO_CONFIGURATION_CHANGES;
	} else {
		fsp_rt_common_buffer.BootMode = BOOT_WITH_FULL_CONFIGURATION;
	}
	fsp_rt_common_buffer.UpdDataRgnPtr = &memory_init_params;
	fsp_rt_common_buffer.BootLoaderTolumSize = cbmem_overhead_size();

	/* Get any board specific changes */
	fsp_memory_init_params.NvsBufferPtr = (void *)pei_ptr->saved_data;
	fsp_memory_init_params.RtBufferPtr = &fsp_rt_common_buffer;
	fsp_memory_init_params.HobListPtr = &hob_list_ptr;

	/* Update the UPD data */
	soc_memory_init_params(params, &memory_init_params);
	mainboard_memory_init_params(params, &memory_init_params);

	if (IS_ENABLED(CONFIG_MMA))
		setup_mma(&memory_init_params);

	post_code(0x36);

	/* Display the UPD data */
	if (IS_ENABLED(CONFIG_DISPLAY_UPD_DATA))
		soc_display_memory_init_params(original_params,
			&memory_init_params);

	/* Call FspMemoryInit to initialize RAM */
	fsp_memory_init = (FSP_MEMORY_INIT)(fsp_header->ImageBase
		+ fsp_header->FspMemoryInitEntryOffset);
	printk(BIOS_DEBUG, "Calling FspMemoryInit: 0x%p\n", fsp_memory_init);
	printk(BIOS_SPEW, "    0x%p: NvsBufferPtr\n",
		fsp_memory_init_params.NvsBufferPtr);
	printk(BIOS_SPEW, "    0x%p: RtBufferPtr\n",
		fsp_memory_init_params.RtBufferPtr);
	printk(BIOS_SPEW, "    0x%p: HobListPtr\n",
		fsp_memory_init_params.HobListPtr);

	timestamp_add_now(TS_FSP_MEMORY_INIT_START);
	post_code(POST_FSP_MEMORY_INIT);
	status = fsp_memory_init(&fsp_memory_init_params);
	post_code(0x37);
	timestamp_add_now(TS_FSP_MEMORY_INIT_END);

	printk(BIOS_DEBUG, "FspMemoryInit returned 0x%08x\n", status);
	if (status != EFI_SUCCESS)
		die("ERROR - FspMemoryInit failed to initialize memory!\n");

	/* Locate the FSP reserved memory area */
	fsp_reserved_bytes = 0;
	fsp_memory = get_next_resource_hob(&fsp_reserved_guid, hob_list_ptr);
	if (fsp_memory == NULL) {
		fsp_verification_failure = 1;
		printk(BIOS_DEBUG,
			"7.2: FSP_RESERVED_MEMORY_RESOURCE_HOB missing!\n");
	} else {
		fsp_reserved_bytes = fsp_memory->ResourceLength;
		printk(BIOS_DEBUG, "Reserving 0x%016lx bytes for FSP\n",
			(unsigned long int)fsp_reserved_bytes);
	}

	/* Display SMM area */
#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)
	char *smm_base;
	size_t smm_size;

	smm_region((void **)&smm_base, &smm_size);
	printk(BIOS_DEBUG, "0x%08x: smm_size\n", (unsigned int)smm_size);
	printk(BIOS_DEBUG, "0x%p: smm_base\n", smm_base);
#endif

	/* Migrate CAR data */
	printk(BIOS_DEBUG, "0x%p: cbmem_top\n", cbmem_top());
	if (pei_ptr->boot_mode != SLEEP_STATE_S3) {
		cbmem_initialize_empty_id_size(CBMEM_ID_FSP_RESERVED_MEMORY,
			fsp_reserved_bytes);
	} else if (cbmem_initialize_id_size(CBMEM_ID_FSP_RESERVED_MEMORY,
		fsp_reserved_bytes)) {
#if IS_ENABLED(CONFIG_HAVE_ACPI_RESUME)
		printk(BIOS_DEBUG, "Failed to recover CBMEM in S3 resume.\n");
		/* Failed S3 resume, reset to come up cleanly */
		hard_reset();
#endif
	}

	/* Save the FSP runtime parameters. */
	fsp_set_runtime(fsp_header, hob_list_ptr);

	/* Lookup the FSP_BOOTLOADER_TOLUM_HOB */
	cbmem_root = get_next_resource_hob(&bootldr_tolum_guid, hob_list_ptr);
	if (cbmem_root == NULL) {
		fsp_verification_failure = 1;
		printk(BIOS_ERR, "7.4: FSP_BOOTLOADER_TOLUM_HOB missing!\n");
		printk(BIOS_ERR, "BootLoaderTolumSize: 0x%08x bytes\n",
			fsp_rt_common_buffer.BootLoaderTolumSize);
	}

	/* Locate the FSP_SMBIOS_MEMORY_INFO HOB */
	memory_info_hob = get_next_guid_hob(&memory_info_hob_guid,
		hob_list_ptr);
	if (NULL == memory_info_hob) {
		printk(BIOS_ERR, "FSP_SMBIOS_MEMORY_INFO HOB missing!\n");
		fsp_verification_failure = 1;
	} else {
		printk(BIOS_DEBUG,
			"FSP_SMBIOS_MEMORY_INFO HOB: 0x%p\n",
			memory_info_hob);
	}

#if IS_ENABLED(CONFIG_DISPLAY_HOBS)
	if (hob_list_ptr == NULL)
		die("ERROR - HOB pointer is NULL!\n");

	/*
	 * Verify that FSP is generating the required HOBs:
	 *	7.1: FSP_BOOTLOADER_TEMP_MEMORY_HOB only produced for FSP 1.0
	 *	7.2: FSP_RESERVED_MEMORY_RESOURCE_HOB verified above
	 *	7.3: FSP_NON_VOLATILE_STORAGE_HOB verified below
	 *	7.4: FSP_BOOTLOADER_TOLUM_HOB verified above
	 *	7.5: EFI_PEI_GRAPHICS_INFO_HOB produced by SiliconInit
	 *	FSP_SMBIOS_MEMORY_INFO HOB verified above
	 */
	if (NULL != cbmem_root) {
		printk(BIOS_DEBUG,
			"7.4: FSP_BOOTLOADER_TOLUM_HOB: 0x%p\n",
			cbmem_root);
		data = cbmem_root->PhysicalStart;
		printk(BIOS_DEBUG, "    0x%016lx: PhysicalStart\n", data);
		data = cbmem_root->ResourceLength;
		printk(BIOS_DEBUG, "    0x%016lx: ResourceLength\n", data);
	}
	hob_ptr.Raw = get_next_guid_hob(&mrc_guid, hob_list_ptr);
	if (NULL == hob_ptr.Raw) {
		printk(BIOS_ERR, "7.3: FSP_NON_VOLATILE_STORAGE_HOB missing!\n");
		fsp_verification_failure =
			(params->pei_data->saved_data == NULL) ? 1 : 0;
	} else {
		printk(BIOS_DEBUG,
			"7.3: FSP_NON_VOLATILE_STORAGE_HOB: 0x%p\n",
			hob_ptr.Raw);
	}
	if (fsp_memory != NULL) {
		printk(BIOS_DEBUG,
			"7.2: FSP_RESERVED_MEMORY_RESOURCE_HOB: 0x%p\n",
			fsp_memory);
		data = fsp_memory->PhysicalStart;
		printk(BIOS_DEBUG, "    0x%016lx: PhysicalStart\n", data);
		data = fsp_memory->ResourceLength;
		printk(BIOS_DEBUG, "    0x%016lx: ResourceLength\n", data);
	}

	/* Verify all the HOBs are present */
	if (fsp_verification_failure)
		printk(BIOS_DEBUG,
			"ERROR - Missing one or more required FSP HOBs!\n");

	/* Display the HOBs */
	print_hob_type_structure(0, hob_list_ptr);
#endif

	/* Get the address of the CBMEM region for the FSP reserved memory */
	fsp_reserved_memory_area = cbmem_find(CBMEM_ID_FSP_RESERVED_MEMORY);
	printk(BIOS_DEBUG, "0x%p: fsp_reserved_memory_area\n",
		fsp_reserved_memory_area);

	/* Verify the order of CBMEM root and FSP memory */
	if ((fsp_memory != NULL) && (cbmem_root != NULL) &&
		(cbmem_root->PhysicalStart <= fsp_memory->PhysicalStart)) {
		fsp_verification_failure = 1;
		printk(BIOS_DEBUG,
			"ERROR - FSP reserved memory above CBMEM root!\n");
	}

	/* Verify that the FSP memory was properly reserved */
	if ((fsp_memory != NULL) && ((fsp_reserved_memory_area == NULL) ||
		(fsp_memory->PhysicalStart !=
			(unsigned int)fsp_reserved_memory_area))) {
		fsp_verification_failure = 1;
		printk(BIOS_DEBUG, "ERROR - Reserving FSP memory area!\n");
#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)
		if (cbmem_root != NULL) {
			size_t delta_bytes = (unsigned int)smm_base
				- cbmem_root->PhysicalStart
				- cbmem_root->ResourceLength;
			printk(BIOS_DEBUG,
				"0x%08x: Chipset reserved bytes reported by FSP\n",
				(unsigned int)delta_bytes);
			die("Please verify the chipset reserved size\n");
		}
#endif
	}

	/* Verify the FSP 1.1 HOB interface */
	if (fsp_verification_failure)
		die("ERROR - Coreboot's requirements not met by FSP binary!\n");

	/* Display the memory configuration */
	report_memory_config();

	/* Locate the memory configuration data to speed up the next reboot */
	mrc_hob = get_next_guid_hob(&mrc_guid, hob_list_ptr);
	if (mrc_hob == NULL)
		printk(BIOS_DEBUG,
			"Memory Configuration Data Hob not present\n");
	else {
		pei_ptr->data_to_save = GET_GUID_HOB_DATA(mrc_hob);
		pei_ptr->data_to_save_size = ALIGN(
			((u32)GET_HOB_LENGTH(mrc_hob)), 16);
	}
}
Esempio n. 19
0
asmlinkage void car_stage_entry(void)
{
	void *hob_list_ptr;
	const void *mrc_data;
	struct range_entry fsp_mem, reg_car;
	struct postcar_frame pcf;
	size_t  mrc_data_size;
	uintptr_t top_of_ram;
	int prev_sleep_state;
	struct romstage_handoff *handoff;
	struct chipset_power_state *ps = car_get_var_ptr(&power_state);

	timestamp_add_now(TS_START_ROMSTAGE);

	soc_early_romstage_init();
	disable_watchdog();

	console_init();

	prev_sleep_state = fill_power_state(ps);

	/* Make sure the blob does not override our data in CAR */
	range_entry_init(&reg_car, (uintptr_t)_car_relocatable_data_end,
			(uintptr_t)_car_region_end, 0);

	if (fsp_memory_init(&hob_list_ptr, &reg_car) != FSP_SUCCESS) {
		die("FSP memory init failed. Giving up.");
	}

	fsp_find_reserved_memory(&fsp_mem, hob_list_ptr);

	/* initialize cbmem by adding FSP reserved memory first thing */
	if (prev_sleep_state != SLEEP_STATE_S3) {
		cbmem_initialize_empty_id_size(CBMEM_ID_FSP_RESERVED_MEMORY,
			range_entry_size(&fsp_mem));
	} else if (cbmem_initialize_id_size(CBMEM_ID_FSP_RESERVED_MEMORY,
				range_entry_size(&fsp_mem))) {
		if (IS_ENABLED(CONFIG_HAVE_ACPI_RESUME)) {
			printk(BIOS_DEBUG, "Failed to recover CBMEM in S3 resume.\n");
			/* Failed S3 resume, reset to come up cleanly */
			hard_reset();
		}
	}

	/* make sure FSP memory is reserved in cbmem */
	if (range_entry_base(&fsp_mem) !=
		(uintptr_t)cbmem_find(CBMEM_ID_FSP_RESERVED_MEMORY))
		die("Failed to accommodate FSP reserved memory request");

	/* Now that CBMEM is up, save the list so ramstage can use it */
	fsp_save_hob_list(hob_list_ptr);

	/* Save MRC Data to CBMEM */
	if (IS_ENABLED(CONFIG_CACHE_MRC_SETTINGS) &&
	    (prev_sleep_state != SLEEP_STATE_S3))
	{
		mrc_data = fsp_find_nv_storage_data(&mrc_data_size);
		if (mrc_data && mrc_cache_stash_data(mrc_data, mrc_data_size) < 0)
			printk(BIOS_ERR, "Failed to stash MRC data\n");
	}

	/* Create romstage handof information */
	handoff = romstage_handoff_find_or_add();
	if (handoff != NULL)
		handoff->s3_resume = (prev_sleep_state == SLEEP_STATE_S3);
	else
		printk(BIOS_DEBUG, "Romstage handoff structure not added!\n");

	if (postcar_frame_init(&pcf, 1*KiB))
		die("Unable to initialize postcar frame.\n");

	/*
	 * We need to make sure ramstage will be run cached. At this point exact
	 * location of ramstage in cbmem is not known. Instruct postcar to cache
	 * 16 megs under cbmem top which is a safe bet to cover ramstage.
	 */
	top_of_ram = (uintptr_t) cbmem_top();
	/* cbmem_top() needs to be at least 16 MiB aligned */
	assert(ALIGN_DOWN(top_of_ram, 16*MiB) == top_of_ram);
	postcar_frame_add_mtrr(&pcf, top_of_ram - 16*MiB, 16*MiB, MTRR_TYPE_WRBACK);

	run_postcar_phase(&pcf);
}