static int map_madt_entry(int type, u32 acpi_id) { unsigned long madt_end, entry; static struct acpi_table_madt *madt; static int read_madt; int phys_id = -1; /* CPU hardware ID */ if (!read_madt) { if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, (struct acpi_table_header **)&madt))) madt = NULL; read_madt++; } if (!madt) return phys_id; entry = (unsigned long)madt; madt_end = entry + madt->header.length; /* Parse all entries looking for a match. */ entry += sizeof(struct acpi_table_madt); while (entry + sizeof(struct acpi_subtable_header) < madt_end) { struct acpi_subtable_header *header = (struct acpi_subtable_header *)entry; if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { if (!map_lapic_id(header, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { if (!map_x2apic_id(header, type, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { if (!map_lsapic_id(header, type, acpi_id, &phys_id)) break; } entry += header->length; } return phys_id; }
static ssize_t acpi_system_read_dsdt ( struct file *file, char __user *buffer, size_t count, loff_t *ppos) { acpi_status status = AE_OK; struct acpi_buffer dsdt = {ACPI_ALLOCATE_BUFFER, NULL}; ssize_t res; ACPI_FUNCTION_TRACE("acpi_system_read_dsdt"); status = acpi_get_table(ACPI_TABLE_DSDT, 1, &dsdt); if (ACPI_FAILURE(status)) return_VALUE(-ENODEV); res = simple_read_from_buffer(buffer, count, ppos, dsdt.pointer, dsdt.length); acpi_os_free(dsdt.pointer); return_VALUE(res); }
void __init acpi_hest_init(void) { acpi_status status; int rc = -ENODEV; unsigned int ghes_count = 0; if (hest_disable) { pr_info(HEST_PFX "Table parsing disabled.\n"); return; } status = acpi_get_table(ACPI_SIG_HEST, 0, (struct acpi_table_header **)&hest_tab); if (status == AE_NOT_FOUND) goto err; else if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); pr_err(HEST_PFX "Failed to get table, %s\n", msg); rc = -EINVAL; goto err; } if (!ghes_disable) { rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); if (rc) goto err; rc = hest_ghes_dev_register(ghes_count); if (rc) goto err; } pr_info(HEST_PFX "Table parsing has been initialized.\n"); return; err: hest_disable = 1; }
static int crb_acpi_add(struct acpi_device *device) { struct tpm_chip *chip; struct acpi_tpm2 *buf; struct crb_priv *priv; struct device *dev = &device->dev; acpi_status status; u32 sm; u64 pa; int rc; status = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **) &buf); if (ACPI_FAILURE(status)) { dev_err(dev, "failed to get TPM2 ACPI table\n"); return -ENODEV; } /* Should the FIFO driver handle this? */ if (buf->start_method == TPM2_START_FIFO) return -ENODEV; chip = tpmm_chip_alloc(dev, &tpm_crb); if (IS_ERR(chip)) return PTR_ERR(chip); chip->flags = TPM_CHIP_FLAG_TPM2; if (buf->hdr.length < sizeof(struct acpi_tpm2)) { dev_err(dev, "TPM2 ACPI table has wrong size"); return -EINVAL; } priv = (struct crb_priv *) devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); if (!priv) { dev_err(dev, "failed to devm_kzalloc for private data\n"); return -ENOMEM; } sm = le32_to_cpu(buf->start_method); /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs * report only ACPI start but in practice seems to require both * ACPI start and CRB start. */ if (sm == TPM2_START_CRB || sm == TPM2_START_FIFO || !strcmp(acpi_device_hid(device), "MSFT0101")) priv->flags |= CRB_FL_CRB_START; if (sm == TPM2_START_ACPI || sm == TPM2_START_CRB_WITH_ACPI) priv->flags |= CRB_FL_ACPI_START; priv->cca = (struct crb_control_area __iomem *) devm_ioremap_nocache(dev, buf->control_area_pa, 0x1000); if (!priv->cca) { dev_err(dev, "ioremap of the control area failed\n"); return -ENOMEM; } pa = ((u64) le32_to_cpu(ioread32(&priv->cca->cmd_pa_high)) << 32) | (u64) le32_to_cpu(ioread32(&priv->cca->cmd_pa_low)); priv->cmd = devm_ioremap_nocache(dev, pa, ioread32(&priv->cca->cmd_size)); if (!priv->cmd) { dev_err(dev, "ioremap of the command buffer failed\n"); return -ENOMEM; } memcpy_fromio(&pa, &priv->cca->rsp_pa, 8); pa = le64_to_cpu(pa); priv->rsp = devm_ioremap_nocache(dev, pa, ioread32(&priv->cca->rsp_size)); if (!priv->rsp) { dev_err(dev, "ioremap of the response buffer failed\n"); return -ENOMEM; } chip->vendor.priv = priv; /* Default timeouts and durations */ chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); chip->vendor.duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT); chip->vendor.duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM); chip->vendor.duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG); chip->acpi_dev_handle = device->handle; rc = tpm2_do_selftest(chip); if (rc) return rc; return tpm_chip_register(chip); }
static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, struct resource *io_res, u64 start, u32 size) { struct resource new_res = { .start = start, .end = start + size - 1, .flags = IORESOURCE_MEM, }; /* Detect a 64 bit address on a 32 bit system */ if (start != new_res.start) return (void __iomem *) ERR_PTR(-EINVAL); if (!resource_contains(io_res, &new_res)) return devm_ioremap_resource(dev, &new_res); return priv->iobase + (new_res.start - io_res->start); } static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, struct acpi_table_tpm2 *buf) { struct list_head resources; struct resource io_res; struct device *dev = &device->dev; u64 cmd_pa; u32 cmd_size; u64 rsp_pa; u32 rsp_size; int ret; INIT_LIST_HEAD(&resources); ret = acpi_dev_get_resources(device, &resources, crb_check_resource, &io_res); if (ret < 0) return ret; acpi_dev_free_resource_list(&resources); if (resource_type(&io_res) != IORESOURCE_MEM) { dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); return -EINVAL; } priv->iobase = devm_ioremap_resource(dev, &io_res); if (IS_ERR(priv->iobase)) return PTR_ERR(priv->iobase); priv->cca = crb_map_res(dev, priv, &io_res, buf->control_address, sizeof(struct crb_control_area)); if (IS_ERR(priv->cca)) return PTR_ERR(priv->cca); cmd_pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) | (u64) ioread32(&priv->cca->cmd_pa_low); cmd_size = ioread32(&priv->cca->cmd_size); priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); if (IS_ERR(priv->cmd)) return PTR_ERR(priv->cmd); memcpy_fromio(&rsp_pa, &priv->cca->rsp_pa, 8); rsp_pa = le64_to_cpu(rsp_pa); rsp_size = ioread32(&priv->cca->rsp_size); if (cmd_pa != rsp_pa) { priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); return PTR_ERR_OR_ZERO(priv->rsp); } /* According to the PTP specification, overlapping command and response * buffer sizes must be identical. */ if (cmd_size != rsp_size) { dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical"); return -EINVAL; } priv->rsp = priv->cmd; return 0; } static int crb_acpi_add(struct acpi_device *device) { struct acpi_table_tpm2 *buf; struct crb_priv *priv; struct device *dev = &device->dev; acpi_status status; u32 sm; int rc; status = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **) &buf); if (ACPI_FAILURE(status) || buf->header.length < sizeof(*buf)) { dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); return -EINVAL; } /* Should the FIFO driver handle this? */ sm = buf->start_method; if (sm == ACPI_TPM2_MEMORY_MAPPED) return -ENODEV; priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs * report only ACPI start but in practice seems to require both * ACPI start and CRB start. */ if (sm == ACPI_TPM2_COMMAND_BUFFER || sm == ACPI_TPM2_MEMORY_MAPPED || !strcmp(acpi_device_hid(device), "MSFT0101")) priv->flags |= CRB_FL_CRB_START; if (sm == ACPI_TPM2_START_METHOD || sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) priv->flags |= CRB_FL_ACPI_START; rc = crb_map_io(device, priv, buf); if (rc) return rc; return crb_init(device, priv); } static int crb_acpi_remove(struct acpi_device *device) { struct device *dev = &device->dev; struct tpm_chip *chip = dev_get_drvdata(dev); tpm_chip_unregister(chip); return 0; } static struct acpi_device_id crb_device_ids[] = { {"MSFT0101", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, crb_device_ids); static struct acpi_driver crb_acpi_driver = { .name = "tpm_crb", .ids = crb_device_ids, .ops = { .add = crb_acpi_add, .remove = crb_acpi_remove, }, .drv = { .pm = &crb_pm, }, };
/** * parse_spcr() - parse ACPI SPCR table and add preferred console * * @earlycon: set up earlycon for the console specified by the table * * For the architectures with support for ACPI, CONFIG_ACPI_SPCR_TABLE may be * defined to parse ACPI SPCR table. As a result of the parsing preferred * console is registered and if @earlycon is true, earlycon is set up. * * When CONFIG_ACPI_SPCR_TABLE is defined, this function should be called * from arch initialization code as soon as the DT/ACPI decision is made. * */ int __init parse_spcr(bool earlycon) { static char opts[64]; struct acpi_table_spcr *table; acpi_status status; char *uart; char *iotype; int baud_rate; int err; if (acpi_disabled) return -ENODEV; status = acpi_get_table(ACPI_SIG_SPCR, 0, (struct acpi_table_header **)&table); if (ACPI_FAILURE(status)) return -ENOENT; if (table->header.revision < 2) { err = -ENOENT; pr_err("wrong table version\n"); goto done; } if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { switch (table->serial_port.access_width) { default: pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n"); case ACPI_ACCESS_SIZE_BYTE: iotype = "mmio"; break; case ACPI_ACCESS_SIZE_WORD: iotype = "mmio16"; break; case ACPI_ACCESS_SIZE_DWORD: iotype = "mmio32"; break; } } else iotype = "io"; switch (table->interface_type) { case ACPI_DBG2_ARM_SBSA_32BIT: iotype = "mmio32"; /* fall through */ case ACPI_DBG2_ARM_PL011: case ACPI_DBG2_ARM_SBSA_GENERIC: case ACPI_DBG2_BCM2835: uart = "pl011"; break; case ACPI_DBG2_16550_COMPATIBLE: case ACPI_DBG2_16550_SUBSET: uart = "uart"; break; default: err = -ENOENT; goto done; } switch (table->baud_rate) { case 3: baud_rate = 9600; break; case 4: baud_rate = 19200; break; case 6: baud_rate = 57600; break; case 7: baud_rate = 115200; break; default: err = -ENOENT; goto done; } if (qdf2400_erratum_44_present(&table->header)) uart = "qdf2400_e44"; if (xgene_8250_erratum_present(table)) iotype = "mmio32"; snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype, table->serial_port.address, baud_rate); pr_info("console: %s\n", opts); if (earlycon) setup_earlycon(opts); err = add_preferred_console(uart, 0, opts + strlen(uart) + 1); done: acpi_put_table((struct acpi_table_header *)table); return err; }
void __init acpi_early_init (void) { acpi_status status = AE_OK; struct acpi_buffer buffer = {sizeof(acpi_fadt), &acpi_fadt}; ACPI_FUNCTION_TRACE("acpi_early_init"); if (acpi_disabled) return_VOID; /* enable workarounds, unless strict ACPI spec. compliance */ if (!acpi_strict) acpi_gbl_enable_interpreter_slack = TRUE; status = acpi_initialize_subsystem(); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to initialize the ACPI Interpreter\n"); goto error0; } status = acpi_load_tables(); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to load the System Description Tables\n"); goto error0; } /* * Get a separate copy of the FADT for use by other drivers. */ status = acpi_get_table(ACPI_TABLE_FADT, 1, &buffer); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to get the FADT\n"); goto error0; } #ifdef CONFIG_X86 if (!acpi_ioapic) { extern acpi_interrupt_flags acpi_sci_flags; /* compatible (0) means level (3) */ if (acpi_sci_flags.trigger == 0) acpi_sci_flags.trigger = 3; /* Set PIC-mode SCI trigger type */ acpi_pic_sci_set_trigger(acpi_fadt.sci_int, acpi_sci_flags.trigger); } else { extern int acpi_sci_override_gsi; /* * now that acpi_fadt is initialized, * update it with result from INT_SRC_OVR parsing */ acpi_fadt.sci_int = acpi_sci_override_gsi; } #endif status = acpi_enable_subsystem(~(ACPI_NO_HARDWARE_INIT | ACPI_NO_ACPI_ENABLE)); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to enable ACPI\n"); goto error0; } return_VOID; error0: disable_acpi(); return_VOID; }
static int __init erst_init(void) { int rc = 0; acpi_status status; struct apei_exec_context ctx; struct apei_resources erst_resources; struct resource *r; if (acpi_disabled) goto err; if (erst_disable) { pr_info(ERST_PFX "Error Record Serialization Table (ERST) support is disabled.\n"); goto err; } status = acpi_get_table(ACPI_SIG_ERST, 0, (struct acpi_table_header **)&erst_tab); if (status == AE_NOT_FOUND) { pr_info(ERST_PFX "Table is not found!\n"); goto err; } else if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); pr_err(ERST_PFX "Failed to get table, %s\n", msg); rc = -EINVAL; goto err; } rc = erst_check_table(erst_tab); if (rc) { pr_err(FW_BUG ERST_PFX "ERST table is invalid\n"); goto err; } apei_resources_init(&erst_resources); erst_exec_ctx_init(&ctx); rc = apei_exec_collect_resources(&ctx, &erst_resources); if (rc) goto err_fini; rc = apei_resources_request(&erst_resources, "APEI ERST"); if (rc) goto err_fini; rc = apei_exec_pre_map_gars(&ctx); if (rc) goto err_release; rc = erst_get_erange(&erst_erange); if (rc) { if (rc == -ENODEV) pr_info(ERST_PFX "The corresponding hardware device or firmware implementation " "is not available.\n"); else pr_err(ERST_PFX "Failed to get Error Log Address Range.\n"); goto err_unmap_reg; } r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST"); if (!r) { pr_err(ERST_PFX "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n", (unsigned long long)erst_erange.base, (unsigned long long)erst_erange.base + erst_erange.size); rc = -EIO; goto err_unmap_reg; } rc = -ENOMEM; erst_erange.vaddr = ioremap_cache(erst_erange.base, erst_erange.size); if (!erst_erange.vaddr) goto err_release_erange; pr_info(ERST_PFX "Error Record Serialization Table (ERST) support is initialized.\n"); return 0; err_release_erange: release_mem_region(erst_erange.base, erst_erange.size); err_unmap_reg: apei_exec_post_unmap_gars(&ctx); err_release: apei_resources_release(&erst_resources); err_fini: apei_resources_fini(&erst_resources); err: erst_disable = 1; return rc; }
static int __init pvh_setup_acpi_madt(struct domain *d, paddr_t *addr) { struct acpi_table_madt *madt; struct acpi_table_header *table; struct acpi_madt_io_apic *io_apic; struct acpi_madt_local_x2apic *x2apic; acpi_status status; unsigned long size; unsigned int i, max_vcpus; int rc; /* Count number of interrupt overrides in the MADT. */ acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_count_intr_ovr, UINT_MAX); /* Count number of NMI sources in the MADT. */ acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_count_nmi_src, UINT_MAX); max_vcpus = dom0_max_vcpus(); /* Calculate the size of the crafted MADT. */ size = sizeof(*madt); size += sizeof(*io_apic) * nr_ioapics; size += sizeof(*intsrcovr) * acpi_intr_overrides; size += sizeof(*nmisrc) * acpi_nmi_sources; size += sizeof(*x2apic) * max_vcpus; madt = xzalloc_bytes(size); if ( !madt ) { printk("Unable to allocate memory for MADT table\n"); rc = -ENOMEM; goto out; } /* Copy the native MADT table header. */ status = acpi_get_table(ACPI_SIG_MADT, 0, &table); if ( !ACPI_SUCCESS(status) ) { printk("Failed to get MADT ACPI table, aborting.\n"); rc = -EINVAL; goto out; } madt->header = *table; madt->address = APIC_DEFAULT_PHYS_BASE; /* * NB: this is currently set to 4, which is the revision in the ACPI * spec 6.1. Sadly ACPICA doesn't provide revision numbers for the * tables described in the headers. */ madt->header.revision = min_t(unsigned char, table->revision, 4); /* Setup the IO APIC entries. */ io_apic = (void *)(madt + 1); for ( i = 0; i < nr_ioapics; i++ ) { io_apic->header.type = ACPI_MADT_TYPE_IO_APIC; io_apic->header.length = sizeof(*io_apic); io_apic->id = domain_vioapic(d, i)->id; io_apic->address = domain_vioapic(d, i)->base_address; io_apic->global_irq_base = domain_vioapic(d, i)->base_gsi; io_apic++; } x2apic = (void *)io_apic; for ( i = 0; i < max_vcpus; i++ ) { x2apic->header.type = ACPI_MADT_TYPE_LOCAL_X2APIC; x2apic->header.length = sizeof(*x2apic); x2apic->uid = i; x2apic->local_apic_id = i * 2; x2apic->lapic_flags = ACPI_MADT_ENABLED; x2apic++; } /* Setup interrupt overrides. */ intsrcovr = (void *)x2apic; acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_set_intr_ovr, acpi_intr_overrides); /* Setup NMI sources. */ nmisrc = (void *)intsrcovr; acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_set_nmi_src, acpi_nmi_sources); ASSERT(((void *)nmisrc - (void *)madt) == size); madt->header.length = size; /* * Calling acpi_tb_checksum here is a layering violation, but * introducing a wrapper for such simple usage seems overkill. */ madt->header.checksum -= acpi_tb_checksum(ACPI_CAST_PTR(u8, madt), size); /* Place the new MADT in guest memory space. */ if ( pvh_steal_ram(d, size, 0, GB(4), addr) ) { printk("Unable to find allocate guest RAM for MADT\n"); rc = -ENOMEM; goto out; } /* Mark this region as E820_ACPI. */ if ( pvh_add_mem_range(d, *addr, *addr + size, E820_ACPI) ) printk("Unable to add MADT region to memory map\n"); rc = hvm_copy_to_guest_phys(*addr, madt, size, d->vcpu[0]); if ( rc ) { printk("Unable to copy MADT into guest memory\n"); goto out; } rc = 0; out: xfree(madt); return rc; }
/* * This function is used to initialize the context with right values. In this * method, we can make all the detection we want, and modify the asus_laptop * struct */ static int asus_laptop_get_info(struct asus_laptop *asus) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *model = NULL; unsigned long long bsts_result, hwrs_result; char *string = NULL; acpi_status status; /* * Get DSDT headers early enough to allow for differentiating between * models, but late enough to allow acpi_bus_register_driver() to fail * before doing anything ACPI-specific. Should we encounter a machine, * which needs special handling (i.e. its hotkey device has a different * HID), this bit will be moved. */ status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info); if (ACPI_FAILURE(status)) pr_warning("Couldn't get the DSDT table header\n"); /* We have to write 0 on init this far for all ASUS models */ if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) { pr_err("Hotkey initialization failed\n"); return -ENODEV; } /* This needs to be called for some laptops to init properly */ status = acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result); if (ACPI_FAILURE(status)) pr_warning("Error calling BSTS\n"); else if (bsts_result) pr_notice("BSTS called, 0x%02x returned\n", (uint) bsts_result); /* This too ... */ if (write_acpi_int(asus->handle, "CWAP", wapf)) pr_err("Error calling CWAP(%d)\n", wapf); /* * Try to match the object returned by INIT to the specific model. * Handle every possible object (or the lack of thereof) the DSDT * writers might throw at us. When in trouble, we pass NULL to * asus_model_match() and try something completely different. */ if (buffer.pointer) { model = buffer.pointer; switch (model->type) { case ACPI_TYPE_STRING: string = model->string.pointer; break; case ACPI_TYPE_BUFFER: string = model->buffer.pointer; break; default: string = ""; break; } } asus->name = kstrdup(string, GFP_KERNEL); if (!asus->name) { kfree(buffer.pointer); return -ENOMEM; } if (*string) pr_notice(" %s model detected\n", string); /* * The HWRS method return informations about the hardware. * 0x80 bit is for WLAN, 0x100 for Bluetooth. * The significance of others is yet to be found. */ status = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result); if (!ACPI_FAILURE(status)) pr_notice(" HRWS returned %x", (int)hwrs_result); if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) asus->have_rsts = true; /* Scheduled for removal */ ASUS_HANDLE_INIT(lcd_switch); ASUS_HANDLE_INIT(display_get); kfree(model); return AE_OK; }
void __init efi_bgrt_init(void) { acpi_status status; void *image; struct bmp_header bmp_header; if (acpi_disabled) return; status = acpi_get_table("BGRT", 0, (struct acpi_table_header **)&bgrt_tab); if (ACPI_FAILURE(status)) return; if (bgrt_tab->header.length < sizeof(*bgrt_tab)) { pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n", bgrt_tab->header.length, sizeof(*bgrt_tab)); return; } if (bgrt_tab->version != 1) { pr_err("Ignoring BGRT: invalid version %u (expected 1)\n", bgrt_tab->version); return; } if (bgrt_tab->status & 0xfe) { pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n", bgrt_tab->status); return; } if (bgrt_tab->status != 1) { pr_debug("Ignoring BGRT: invalid status %u (expected 1)\n", bgrt_tab->status); return; } if (bgrt_tab->image_type != 0) { pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n", bgrt_tab->image_type); return; } if (!bgrt_tab->image_address) { pr_err("Ignoring BGRT: null image address\n"); return; } image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB); if (!image) { pr_err("Ignoring BGRT: failed to map image header memory\n"); return; } memcpy(&bmp_header, image, sizeof(bmp_header)); memunmap(image); bgrt_image_size = bmp_header.size; bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN); if (!bgrt_image) { pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n", bgrt_image_size); return; } image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB); if (!image) { pr_err("Ignoring BGRT: failed to map image memory\n"); kfree(bgrt_image); bgrt_image = NULL; return; } memcpy(bgrt_image, image, bgrt_image_size); memunmap(image); }
static int asus_laptop_get_info(struct asus_laptop *asus) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *model = NULL; unsigned long long bsts_result, hwrs_result; char *string = NULL; acpi_status status; status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info); if (ACPI_FAILURE(status)) pr_warn("Couldn't get the DSDT table header\n"); if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) { pr_err("Hotkey initialization failed\n"); return -ENODEV; } status = acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result); if (ACPI_FAILURE(status)) pr_warn("Error calling BSTS\n"); else if (bsts_result) pr_notice("BSTS called, 0x%02x returned\n", (uint) bsts_result); if (write_acpi_int(asus->handle, "CWAP", wapf)) pr_err("Error calling CWAP(%d)\n", wapf); if (buffer.pointer) { model = buffer.pointer; switch (model->type) { case ACPI_TYPE_STRING: string = model->string.pointer; break; case ACPI_TYPE_BUFFER: string = model->buffer.pointer; break; default: string = ""; break; } } asus->name = kstrdup(string, GFP_KERNEL); if (!asus->name) { kfree(buffer.pointer); return -ENOMEM; } if (*string) pr_notice(" %s model detected\n", string); status = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result); if (!ACPI_FAILURE(status)) pr_notice(" HWRS returned %x", (int)hwrs_result); if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) asus->have_rsts = true; kfree(model); return AE_OK; }
/** * parse_spcr() - parse ACPI SPCR table and add preferred console * * @earlycon: set up earlycon for the console specified by the table * * For the architectures with support for ACPI, CONFIG_ACPI_SPCR_TABLE may be * defined to parse ACPI SPCR table. As a result of the parsing preferred * console is registered and if @earlycon is true, earlycon is set up. * * When CONFIG_ACPI_SPCR_TABLE is defined, this function should be called * from arch initialization code as soon as the DT/ACPI decision is made. * */ int __init parse_spcr(bool earlycon) { static char opts[64]; struct acpi_table_spcr *table; acpi_status status; char *uart; char *iotype; int baud_rate; int err; if (acpi_disabled) return -ENODEV; status = acpi_get_table(ACPI_SIG_SPCR, 0, (struct acpi_table_header **)&table); if (ACPI_FAILURE(status)) return -ENOENT; if (table->header.revision < 2) { err = -ENOENT; pr_err("wrong table version\n"); goto done; } if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { switch (table->serial_port.access_width) { default: pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n"); case ACPI_ACCESS_SIZE_BYTE: iotype = "mmio"; break; case ACPI_ACCESS_SIZE_WORD: iotype = "mmio16"; break; case ACPI_ACCESS_SIZE_DWORD: iotype = "mmio32"; break; } } else iotype = "io"; switch (table->interface_type) { case ACPI_DBG2_ARM_SBSA_32BIT: iotype = "mmio32"; /* fall through */ case ACPI_DBG2_ARM_PL011: case ACPI_DBG2_ARM_SBSA_GENERIC: case ACPI_DBG2_BCM2835: uart = "pl011"; break; case ACPI_DBG2_16550_COMPATIBLE: case ACPI_DBG2_16550_SUBSET: uart = "uart"; break; default: err = -ENOENT; goto done; } switch (table->baud_rate) { case 3: baud_rate = 9600; break; case 4: baud_rate = 19200; break; case 6: baud_rate = 57600; break; case 7: baud_rate = 115200; break; default: err = -ENOENT; goto done; } /* * If the E44 erratum is required, then we need to tell the pl011 * driver to implement the work-around. * * The global variable is used by the probe function when it * creates the UARTs, whether or not they're used as a console. * * If the user specifies "traditional" earlycon, the qdf2400_e44 * console name matches the EARLYCON_DECLARE() statement, and * SPCR is not used. Parameter "earlycon" is false. * * If the user specifies "SPCR" earlycon, then we need to update * the console name so that it also says "qdf2400_e44". Parameter * "earlycon" is true. * * For consistency, if we change the console name, then we do it * for everyone, not just earlycon. */ if (qdf2400_erratum_44_present(&table->header)) { qdf2400_e44_present = true; if (earlycon) uart = "qdf2400_e44"; } if (xgene_8250_erratum_present(table)) iotype = "mmio32"; snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype, table->serial_port.address, baud_rate); pr_info("console: %s\n", opts); if (earlycon) setup_earlycon(opts); err = add_preferred_console(uart, 0, opts + strlen(uart) + 1); done: acpi_put_table((struct acpi_table_header *)table); return err; }
static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, struct resource *io_res, u64 start, u32 size) { struct resource new_res = { .start = start, .end = start + size - 1, .flags = IORESOURCE_MEM, }; /* Detect a 64 bit address on a 32 bit system */ if (start != new_res.start) return (void __iomem *) ERR_PTR(-EINVAL); if (!resource_contains(io_res, &new_res)) return devm_ioremap_resource(dev, &new_res); return priv->iobase + (new_res.start - io_res->start); } /* * Work around broken BIOSs that return inconsistent values from the ACPI * region vs the registers. Trust the ACPI region. Such broken systems * probably cannot send large TPM commands since the buffer will be truncated. */ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res, u64 start, u64 size) { if (io_res->start > start || io_res->end < start) return size; if (start + size - 1 <= io_res->end) return size; dev_err(dev, FW_BUG "ACPI region does not cover the entire command/response buffer. %pr vs %llx %llx\n", io_res, start, size); return io_res->end - start + 1; } static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, struct acpi_table_tpm2 *buf) { struct list_head resources; struct resource io_res; struct device *dev = &device->dev; u32 pa_high, pa_low; u64 cmd_pa; u32 cmd_size; __le64 __rsp_pa; u64 rsp_pa; u32 rsp_size; int ret; INIT_LIST_HEAD(&resources); ret = acpi_dev_get_resources(device, &resources, crb_check_resource, &io_res); if (ret < 0) return ret; acpi_dev_free_resource_list(&resources); if (resource_type(&io_res) != IORESOURCE_MEM) { dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); return -EINVAL; } priv->iobase = devm_ioremap_resource(dev, &io_res); if (IS_ERR(priv->iobase)) return PTR_ERR(priv->iobase); /* The ACPI IO region starts at the head area and continues to include * the control area, as one nice sane region except for some older * stuff that puts the control area outside the ACPI IO region. */ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { if (buf->control_address == io_res.start + sizeof(*priv->regs_h)) priv->regs_h = priv->iobase; else dev_warn(dev, FW_BUG "Bad ACPI memory layout"); } ret = __crb_request_locality(dev, priv, 0); if (ret) return ret; priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address, sizeof(struct crb_regs_tail)); if (IS_ERR(priv->regs_t)) return PTR_ERR(priv->regs_t); /* * PTT HW bug w/a: wake up the device to access * possibly not retained registers. */ ret = crb_cmd_ready(dev, priv); if (ret) return ret; pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high); pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low); cmd_pa = ((u64)pa_high << 32) | pa_low; cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa, ioread32(&priv->regs_t->ctrl_cmd_size)); dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n", pa_high, pa_low, cmd_size); priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); if (IS_ERR(priv->cmd)) { ret = PTR_ERR(priv->cmd); goto out; } memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8); rsp_pa = le64_to_cpu(__rsp_pa); rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa, ioread32(&priv->regs_t->ctrl_rsp_size)); if (cmd_pa != rsp_pa) { priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); ret = PTR_ERR_OR_ZERO(priv->rsp); goto out; } /* According to the PTP specification, overlapping command and response * buffer sizes must be identical. */ if (cmd_size != rsp_size) { dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical"); ret = -EINVAL; goto out; } priv->rsp = priv->cmd; out: if (!ret) priv->cmd_size = cmd_size; crb_go_idle(dev, priv); __crb_relinquish_locality(dev, priv, 0); return ret; } static int crb_acpi_add(struct acpi_device *device) { struct acpi_table_tpm2 *buf; struct crb_priv *priv; struct tpm_chip *chip; struct device *dev = &device->dev; struct tpm2_crb_smc *crb_smc; acpi_status status; u32 sm; int rc; status = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **) &buf); if (ACPI_FAILURE(status) || buf->header.length < sizeof(*buf)) { dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); return -EINVAL; } /* Should the FIFO driver handle this? */ sm = buf->start_method; if (sm == ACPI_TPM2_MEMORY_MAPPED) return -ENODEV; priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); if (!priv) return -ENOMEM; if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) { dev_err(dev, FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", buf->header.length, ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); return -EINVAL; } crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf)); priv->smc_func_id = crb_smc->smc_func_id; } priv->sm = sm; priv->hid = acpi_device_hid(device); rc = crb_map_io(device, priv, buf); if (rc) return rc; chip = tpmm_chip_alloc(dev, &tpm_crb); if (IS_ERR(chip)) return PTR_ERR(chip); dev_set_drvdata(&chip->dev, priv); chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; rc = __crb_request_locality(dev, priv, 0); if (rc) return rc; rc = crb_cmd_ready(dev, priv); if (rc) goto out; pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); rc = tpm_chip_register(chip); if (rc) { crb_go_idle(dev, priv); pm_runtime_put_noidle(dev); pm_runtime_disable(dev); goto out; } pm_runtime_put_sync(dev); out: __crb_relinquish_locality(dev, priv, 0); return rc; } static int crb_acpi_remove(struct acpi_device *device) { struct device *dev = &device->dev; struct tpm_chip *chip = dev_get_drvdata(dev); tpm_chip_unregister(chip); pm_runtime_disable(dev); return 0; } static int __maybe_unused crb_pm_runtime_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct crb_priv *priv = dev_get_drvdata(&chip->dev); return crb_go_idle(dev, priv); } static int __maybe_unused crb_pm_runtime_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct crb_priv *priv = dev_get_drvdata(&chip->dev); return crb_cmd_ready(dev, priv); } static int __maybe_unused crb_pm_suspend(struct device *dev) { int ret; ret = tpm_pm_suspend(dev); if (ret) return ret; return crb_pm_runtime_suspend(dev); }
/* read binary bios log */ int read_log(struct tpm_bios_log *log) { struct acpi_tcpa *buff; acpi_status status; void __iomem *virt; u64 len, start; if (log->bios_event_log != NULL) { printk(KERN_ERR "%s: ERROR - Eventlog already initialized\n", __func__); return -EFAULT; } /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ status = acpi_get_table(ACPI_SIG_TCPA, 1, (struct acpi_table_header **)&buff); if (ACPI_FAILURE(status)) { printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n", __func__); return -EIO; } switch(buff->platform_class) { case BIOS_SERVER: len = buff->server.log_max_len; start = buff->server.log_start_addr; break; case BIOS_CLIENT: default: len = buff->client.log_max_len; start = buff->client.log_start_addr; break; } if (!len) { printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__); return -EIO; } /* malloc EventLog space */ log->bios_event_log = kmalloc(len, GFP_KERNEL); if (!log->bios_event_log) { printk("%s: ERROR - Not enough Memory for BIOS measurements\n", __func__); return -ENOMEM; } log->bios_event_log_end = log->bios_event_log + len; virt = acpi_os_map_iomem(start, len); if (!virt) { kfree(log->bios_event_log); printk("%s: ERROR - Unable to map memory\n", __func__); return -EIO; } memcpy_fromio(log->bios_event_log, virt, len); acpi_os_unmap_iomem(virt, len); return 0; }