static int set_timer_fsb(void) { struct cpuinfo_x86 c; int core_fsb[8] = { -1, 133, -1, 166, -1, 100, -1, -1 }; int core2_fsb[8] = { 266, 133, 200, 166, 333, 100, -1, -1 }; get_fms(&c, cpuid_eax(1)); if (c.x86 != 6) return -1; switch (c.x86_model) { case 0xe: /* Core Solo/Duo */ case 0x1c: /* Atom */ car_set_var(g_timer_fsb, core_fsb[rdmsr(MSR_FSB_FREQ).lo & 7]); break; case 0xf: /* Core 2 or Xeon */ case 0x17: /* Enhanced Core */ car_set_var(g_timer_fsb, core2_fsb[rdmsr(MSR_FSB_FREQ).lo & 7]); break; case 0x2a: /* SandyBridge BCLK fixed at 100MHz*/ case 0x3a: /* IvyBridge BCLK fixed at 100MHz*/ case 0x3c: /* Haswell BCLK fixed at 100MHz */ case 0x45: /* Haswell-ULT BCLK fixed at 100MHz */ car_set_var(g_timer_fsb, 100); break; default: car_set_var(g_timer_fsb, 200); break; } return 0; }
/* * Print out the contents of a buffer, if debug is enabled. Skip registers * other than FIFO, unless debug_level_ is 2. */ static void trace_dump(const char *prefix, uint32_t reg, size_t bytes, const uint8_t *buffer, int force) { static char prev_prefix CAR_GLOBAL; static unsigned prev_reg CAR_GLOBAL; static int current_char CAR_GLOBAL; const int BYTES_PER_LINE = 32; int *current_char_ptr = car_get_var_ptr(¤t_char); if (!force) { if (!debug_level_) return; if ((debug_level_ < 2) && (reg != TPM_DATA_FIFO_REG)) return; } /* * Do not print register address again if the last dump print was for * that register. */ if ((car_get_var(prev_prefix) != *prefix) || (car_get_var(prev_reg) != reg)) { car_set_var(prev_prefix, *prefix); car_set_var(prev_reg, reg); printk(BIOS_DEBUG, "\n%s %2.2x:", prefix, reg); *current_char_ptr = 0; } if ((reg != TPM_DATA_FIFO_REG) && (bytes == 4)) { /* * This must be a regular register address, print the 32 bit * value. */ printk(BIOS_DEBUG, " %8.8x", *(const uint32_t *)buffer); } else { int i; /* * Data read from or written to FIFO or not in 4 byte * quantiites is printed byte at a time. */ for (i = 0; i < bytes; i++) { if (*current_char_ptr && !(*current_char_ptr % BYTES_PER_LINE)) { printk(BIOS_DEBUG, "\n "); *current_char_ptr = 0; } (*current_char_ptr)++; printk(BIOS_DEBUG, " %2.2x", buffer[i]); } } }
static void vboot_prepare(void) { int run_verification; run_verification = verification_should_run(); if (run_verification) { verstage_main(); car_set_var(vboot_executed, 1); } else if (verstage_should_load()) { struct cbfsf file; struct prog verstage = PROG_INIT(PROG_VERSTAGE, CONFIG_CBFS_PREFIX "/verstage"); printk(BIOS_DEBUG, "VBOOT: Loading verstage.\n"); /* load verstage from RO */ if (cbfs_boot_locate(&file, prog_name(&verstage), NULL)) die("failed to load verstage"); cbfs_file_data(prog_rdev(&verstage), &file); if (cbfs_prog_stage_load(&verstage)) die("failed to load verstage"); /* verify and select a slot */ prog_run(&verstage); /* This is not actually possible to hit this condition at * runtime, but this provides a hint to the compiler for dead * code elimination below. */ if (!IS_ENABLED(CONFIG_RETURN_FROM_VERSTAGE)) return; car_set_var(vboot_executed, 1); } /* * Fill in vboot cbmem objects before moving to ramstage so all * downstream users have access to vboot results. This path only * applies to platforms employing VBOOT_DYNAMIC_WORK_BUFFER because * cbmem comes online prior to vboot verification taking place. For * other platforms the vboot cbmem objects are initialized when * cbmem comes online. */ if (ENV_ROMSTAGE && IS_ENABLED(CONFIG_VBOOT_DYNAMIC_WORK_BUFFER)) { vb2_store_selected_region(); vboot_fill_handoff(); } }
void qemu_debugcon_init(void) { int detected = (inb(CONFIG_CONSOLE_QEMU_DEBUGCON_PORT) == 0xe9); car_set_var(qemu_debugcon_detected, detected); printk(BIOS_INFO, "QEMU debugcon %s [port 0x%x]\n", detected ? "detected" : "not found", CONFIG_CONSOLE_QEMU_DEBUGCON_PORT); }
asmlinkage void console_init(void) { init_log_level(); if (CONFIG(DEBUG_CONSOLE_INIT)) car_set_var(console_inited, 1); if (CONFIG(EARLY_PCI_BRIDGE) && !ENV_SMM && !ENV_RAMSTAGE) pci_early_bridge_init(); console_hw_init(); car_set_var(console_inited, 1); printk(BIOS_NOTICE, "\n\ncoreboot-%s%s %s " ENV_STRING " starting (log level: %i)...\n", coreboot_version, coreboot_extra_version, coreboot_build, get_log_level()); }
void platform_fsp_memory_init_params_cb(FSPM_UPD *mupd, uint32_t version) { struct region_device rdev; check_full_retrain(mupd); fill_console_params(mupd); if (CONFIG(SOC_INTEL_GLK)) soc_memory_init_params(mupd); mainboard_memory_init_params(mupd); parse_devicetree_setting(mupd); /* Do NOT let FSP do any GPIO pad configuration */ mupd->FspmConfig.PreMemGpioTablePtr = (uintptr_t) NULL; /* * Tell CSE we do not need to use Ring Buffer Protocol (RBP) to fetch * firmware for us if we are using memory-mapped SPI. This lets CSE * state machine transition to next boot state, so that it can function * as designed. */ mupd->FspmConfig.SkipCseRbp = CONFIG(BOOT_DEVICE_MEMORY_MAPPED); /* * Converged Security Engine (CSE) has secure storage functionality. * HECI2 device can be used to access that functionality. However, part * of S3 resume flow involves resetting HECI2 which takes 136ms. Since * coreboot does not use secure storage functionality, instruct FSP to * skip HECI2 reset. */ mupd->FspmConfig.EnableS3Heci2 = 0; /* * Apollolake splits MRC cache into two parts: constant and variable. * The constant part is not expected to change often and variable is. * Currently variable part consists of parameters that change on cold * boots such as scrambler seed and some memory controller registers. * Scrambler seed is vital for S3 resume case because attempt to use * wrong/missing key renders DRAM contents useless. */ if (mrc_cache_get_current(MRC_VARIABLE_DATA, version, &rdev) == 0) { /* Assume leaking is ok. */ assert(CONFIG(BOOT_DEVICE_MEMORY_MAPPED)); mupd->FspmConfig.VariableNvsBufferPtr = rdev_mmap_full(&rdev); } car_set_var(fsp_version, version); }
__weak int tis_plat_irq_status(void) { static int warning_displayed CAR_GLOBAL; if (!car_get_var(warning_displayed)) { printk(BIOS_WARNING, "WARNING: tis_plat_irq_status() not implemented, wasting 10ms to wait on Cr50!\n"); car_set_var(warning_displayed, 1); } mdelay(10); return 1; }
static void boot_device_rw_init(void) { const int bus = CONFIG_BOOT_DEVICE_SPI_FLASH_BUS; const int cs = 0; if (car_get_var(sfg) != NULL) return; /* Ensure any necessary setup is performed by the drivers. */ spi_init(); car_set_var(sfg, spi_flash_probe(bus, cs)); }
int tis_close(void) { if (car_get_var(tpm_is_open)) { /* * Do we need to do something here, like waiting for a * transaction to stop? */ car_set_var(tpm_is_open, 0); } return 0; }
static void boot_device_rw_init(void) { const int bus = CONFIG_BOOT_DEVICE_SPI_FLASH_BUS; const int cs = 0; if (car_get_var(sfg_init_done) == true) return; /* Ensure any necessary setup is performed by the drivers. */ spi_init(); if (!spi_flash_probe(bus, cs, car_get_var_ptr(&sfg))) car_set_var(sfg_init_done, true); }
void platform_fsp_memory_init_params_cb(FSPM_UPD *mupd, uint32_t version) { const struct mrc_saved_data *msd; fill_console_params(mupd); mainboard_memory_init_params(mupd); /* Do NOT let FSP do any GPIO pad configuration */ mupd->FspmConfig.PreMemGpioTablePtr = (uintptr_t) NULL; /* * Tell CSE we do not need to use Ring Buffer Protocol (RBP) to fetch * firmware for us if we are using memory-mapped SPI. This lets CSE * state machine transition to next boot state, so that it can function * as designed. */ mupd->FspmConfig.SkipCseRbp = IS_ENABLED(CONFIG_BOOT_DEVICE_MEMORY_MAPPED); /* * Converged Security Engine (CSE) has secure storage functionality. * HECI2 device can be used to access that functionality. However, part * of S3 resume flow involves resetting HECI2 which takes 136ms. Since * coreboot does not use secure storage functionality, instruct FSP to * skip HECI2 reset. */ mupd->FspmConfig.EnableS3Heci2 = 0; /* * Apollolake splits MRC cache into two parts: constant and variable. * The constant part is not expected to change often and variable is. * Currently variable part consists of parameters that change on cold * boots such as scrambler seed and some memory controller registers. * Scrambler seed is vital for S3 resume case because attempt to use * wrong/missing key renders DRAM contents useless. */ if (mrc_cache_get_vardata(&msd, version) < 0) { printk(BIOS_DEBUG, "MRC variable data missing/invalid\n"); } else { mupd->FspmConfig.VariableNvsBufferPtr = (void*) msd->data; } car_set_var(fsp_version, version); }
static int marshal_hierarchy_control(struct obuf *ob, struct tpm2_hierarchy_control_cmd *command_body) { int rc = 0; struct tpm2_session_header session_header; car_set_var(tpm_tag, TPM_ST_SESSIONS); rc |= marshal_TPM_HANDLE(ob, TPM_RH_PLATFORM); memset(&session_header, 0, sizeof(session_header)); session_header.session_handle = TPM_RS_PW; rc |= marshal_session_header(ob, &session_header); rc |= marshal_TPM_HANDLE(ob, command_body->enable); rc |= obuf_write_be8(ob, command_body->state); return rc; }
/* * Common session header can include one or two handles and an empty * session_header structure. */ static int marshal_common_session_header(struct obuf *ob, const uint32_t *handles, size_t handle_count) { size_t i; struct tpm2_session_header session_header; int rc = 0; car_set_var(tpm_tag, TPM_ST_SESSIONS); for (i = 0; i < handle_count; i++) rc |= marshal_TPM_HANDLE(ob, handles[i]); memset(&session_header, 0, sizeof(session_header)); session_header.session_handle = TPM_RS_PW; rc |= marshal_session_header(ob, &session_header); return rc; }
int pci_early_device_probe(u8 bus, u8 dev, u32 mmio_base) { pci_devfn_t device = PCI_DEV(bus, dev, 0); u32 id = pci_read_config32(device, PCI_VENDOR_ID); switch (id) { case 0xc1181415: /* e.g. Startech PEX1S1PMINI function 0 */ /* On this device function 0 is the parallel port, and * function 3 is the serial port. So let's go look for * the UART. */ device = PCI_DEV(bus, dev, 3); id = pci_read_config32(device, PCI_VENDOR_ID); if (id != 0xc11b1415) return -1; break; case 0xc11b1415: /* e.g. Startech PEX1S1PMINI function 3 */ case 0xc1581415: /* e.g. Startech MPEX2S952 */ break; default: /* No UART here. */ return -1; } /* Sanity-check, we assume fixed location. */ if (mmio_base != CONFIG_EARLY_PCI_MMIO_BASE) return -1; /* Setup base address on device */ pci_write_config32(device, PCI_BASE_ADDRESS_0, mmio_base); /* Enable memory on device */ u16 reg16 = pci_read_config16(device, PCI_COMMAND); reg16 |= PCI_COMMAND_MEMORY; pci_write_config16(device, PCI_COMMAND, reg16); car_set_var(oxpcie_present, 1); return 0; }
int tpm_marshal_command(TPM_CC command, void *tpm_command_body, struct obuf *ob) { struct obuf ob_hdr; const size_t hdr_sz = sizeof(uint16_t) + 2 * sizeof(uint32_t); int rc = 0; car_set_var(tpm_tag, TPM_ST_NO_SESSIONS); if (obuf_splice_current(ob, &ob_hdr, hdr_sz) < 0) return -1; /* Write TPM command header with placeholder field values. */ rc |= obuf_write_be16(ob, 0); rc |= obuf_write_be32(ob, 0); rc |= obuf_write_be32(ob, command); if (rc != 0) return rc; switch (command) { case TPM2_Startup: rc |= marshal_startup(ob, tpm_command_body); break; case TPM2_Shutdown: rc |= marshal_shutdown(ob, tpm_command_body); break; case TPM2_GetCapability: rc |= marshal_get_capability(ob, tpm_command_body); break; case TPM2_NV_Read: rc |= marshal_nv_read(ob, tpm_command_body); break; case TPM2_NV_DefineSpace: rc |= marshal_nv_define_space(ob, tpm_command_body); break; case TPM2_NV_Write: rc |= marshal_nv_write(ob, tpm_command_body); break; case TPM2_NV_WriteLock: rc |= marshal_nv_write_lock(ob, tpm_command_body); break; case TPM2_SelfTest: rc |= marshal_selftest(ob, tpm_command_body); break; case TPM2_Hierarchy_Control: rc |= marshal_hierarchy_control(ob, tpm_command_body); break; case TPM2_Clear: rc |= marshal_clear(ob); break; case TPM2_PCR_Extend: rc |= marshal_pcr_extend(ob, tpm_command_body); break; case TPM2_CR50_VENDOR_COMMAND: rc |= marshal_cr50_vendor_command(ob, tpm_command_body); break; default: printk(BIOS_INFO, "%s:%d:Request to marshal unsupported command %#x\n", __FILE__, __LINE__, command); rc = -1; } if (rc != 0) return rc; /* Fix up the command header with known values. */ rc |= obuf_write_be16(&ob_hdr, car_get_var(tpm_tag)); rc |= obuf_write_be32(&ob_hdr, obuf_nr_written(ob)); return rc; }
void init_timer(void) { if (!car_get_var(clocks_per_usec)) car_set_var(clocks_per_usec, calibrate_tsc()); }
static void vboot_prepare(void) { if (verification_should_run()) { /* Note: this path is not used for VBOOT_RETURN_FROM_VERSTAGE */ verstage_main(); car_set_var(vboot_executed, 1); vb2_save_recovery_reason_vbnv(); /* * Avoid double memory retrain when the EC is running RW code * and a recovery request came in through an EC host event. The * double retrain happens because the EC won't be rebooted * until kernel verification notices the EC isn't running RO * code which is after memory training. Therefore, reboot the * EC after we've saved the potential recovery request so it's * not lost. Lastly, only perform this sequence on x86 * platforms since those are the ones that currently do a * costly memory training in recovery mode. */ if (IS_ENABLED(CONFIG_EC_GOOGLE_CHROMEEC) && IS_ENABLED(CONFIG_ARCH_X86)) google_chromeec_early_init(); } else if (verstage_should_load()) { struct cbfsf file; struct prog verstage = PROG_INIT(PROG_VERSTAGE, CONFIG_CBFS_PREFIX "/verstage"); printk(BIOS_DEBUG, "VBOOT: Loading verstage.\n"); /* load verstage from RO */ if (cbfs_boot_locate(&file, prog_name(&verstage), NULL)) die("failed to load verstage"); cbfs_file_data(prog_rdev(&verstage), &file); if (cbfs_prog_stage_load(&verstage)) die("failed to load verstage"); /* verify and select a slot */ prog_run(&verstage); /* This is not actually possible to hit this condition at * runtime, but this provides a hint to the compiler for dead * code elimination below. */ if (!IS_ENABLED(CONFIG_VBOOT_RETURN_FROM_VERSTAGE)) return; car_set_var(vboot_executed, 1); } /* * Fill in vboot cbmem objects before moving to ramstage so all * downstream users have access to vboot results. This path only * applies to platforms employing VBOOT_STARTS_IN_ROMSTAGE because * cbmem comes online prior to vboot verification taking place. For * other platforms the vboot cbmem objects are initialized when * cbmem comes online. */ if (ENV_ROMSTAGE && IS_ENABLED(CONFIG_VBOOT_STARTS_IN_ROMSTAGE)) { vb2_store_selected_region(); vboot_fill_handoff(); } }
/* * Each TPM2 SPI transaction starts the same: CS is asserted, the 4 byte * header is sent to the TPM, the master waits til TPM is ready to continue. * * Returns 1 on success, 0 on failure (TPM SPI flow control timeout.) */ static int start_transaction(int read_write, size_t bytes, unsigned addr) { spi_frame_header header; uint8_t byte; int i; struct stopwatch sw; static int tpm_sync_needed CAR_GLOBAL; static struct stopwatch wake_up_sw CAR_GLOBAL; struct spi_slave *spi_slave = car_get_var_ptr(&g_spi_slave); /* * First Cr50 access in each coreboot stage where TPM is used will be * prepended by a wake up pulse on the CS line. */ int wakeup_needed = 1; /* Wait for TPM to finish previous transaction if needed */ if (car_get_var(tpm_sync_needed)) { tpm_sync(); /* * During the first invocation of this function on each stage * this if () clause code does not run (as tpm_sync_needed * value is zero), during all following invocations the * stopwatch below is guaranteed to be started. */ if (!stopwatch_expired(car_get_var_ptr(&wake_up_sw))) wakeup_needed = 0; } else { car_set_var(tpm_sync_needed, 1); } if (wakeup_needed) { /* Just in case Cr50 is asleep. */ spi_claim_bus(spi_slave); udelay(1); spi_release_bus(spi_slave); udelay(100); } /* * The Cr50 on H1 does not go to sleep for 1 second after any * SPI slave activity, let's be conservative and limit the * window to 900 ms. */ stopwatch_init_msecs_expire(car_get_var_ptr(&wake_up_sw), 900); /* * The first byte of the frame header encodes the transaction type * (read or write) and transfer size (set to lentgh - 1), limited to * 64 bytes. */ header.body[0] = (read_write ? 0x80 : 0) | 0x40 | (bytes - 1); /* The rest of the frame header is the TPM register address. */ for (i = 0; i < 3; i++) header.body[i + 1] = (addr >> (8 * (2 - i))) & 0xff; /* CS assert wakes up the slave. */ spi_claim_bus(spi_slave); /* * The TCG TPM over SPI specification introduces the notion of SPI * flow control (Section "6.4.5 Flow Control"). * * Again, the slave (TPM device) expects each transaction to start * with a 4 byte header trasmitted by master. The header indicates if * the master needs to read or write a register, and the register * address. * * If the slave needs to stall the transaction (for instance it is not * ready to send the register value to the master), it sets the MOSI * line to 0 during the last clock of the 4 byte header. In this case * the master is supposed to start polling the SPI bus, one byte at * time, until the last bit in the received byte (transferred during * the last clock of the byte) is set to 1. * * Due to some SPI controllers' shortcomings (Rockchip comes to * mind...) we trasmit the 4 byte header without checking the byte * transmitted by the TPM during the transaction's last byte. * * We know that cr50 is guaranteed to set the flow control bit to 0 * during the header transfer, but real TPM2 might be fast enough not * to require to stall the master, this would present an issue. * crosbug.com/p/52132 has been opened to track this. */ spi_xfer(spi_slave, header.body, sizeof(header.body), NULL, 0); /* * Now poll the bus until TPM removes the stall bit. Give it up to 100 * ms to sort it out - it could be saving stuff in nvram at some * point. */ stopwatch_init_msecs_expire(&sw, 100); do { if (stopwatch_expired(&sw)) { printk(BIOS_ERR, "TPM flow control failure\n"); spi_release_bus(spi_slave); return 0; } spi_xfer(spi_slave, NULL, 0, &byte, 1); } while (!(byte & 1)); return 1; }