static int i2c_atmel_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct tpm_chip *chip; struct device *dev = &client->dev; struct priv_data *priv; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; chip = tpmm_chip_alloc(dev, &i2c_atmel); if (IS_ERR(chip)) return PTR_ERR(chip); priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); if (!priv) return -ENOMEM; /* Default timeouts */ chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); dev_set_drvdata(&chip->dev, priv); /* There is no known way to probe for this device, and all version * information seems to be read via TPM commands. Thus we rely on the * TPM startup process in the common code to detect the device. */ return tpm_chip_register(chip); }
static int crb_init(struct acpi_device *device, struct crb_priv *priv) { struct tpm_chip *chip; chip = tpmm_chip_alloc(&device->dev, &tpm_crb); if (IS_ERR(chip)) return PTR_ERR(chip); dev_set_drvdata(&chip->dev, priv); chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; return tpm_chip_register(chip); }
static int setup_chip(struct device *dev, struct tpm_private *priv) { struct tpm_chip *chip; chip = tpmm_chip_alloc(dev, &tpm_vtpm); if (IS_ERR(chip)) return PTR_ERR(chip); init_waitqueue_head(&priv->read_queue); priv->chip = chip; dev_set_drvdata(&chip->dev, priv); return 0; }
static int setup_chip(struct device *dev, struct tpm_private *priv) { struct tpm_chip *chip; chip = tpmm_chip_alloc(dev, &tpm_vtpm); if (IS_ERR(chip)) return PTR_ERR(chip); init_waitqueue_head(&chip->vendor.read_queue); priv->chip = chip; TPM_VPRIV(chip) = priv; return 0; }
static int crb_acpi_add(struct acpi_device *device) { struct tpm_chip *chip; struct acpi_tpm2 *buf; struct crb_priv *priv; struct device *dev = &device->dev; acpi_status status; u32 sm; u64 pa; int rc; status = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **) &buf); if (ACPI_FAILURE(status)) { dev_err(dev, "failed to get TPM2 ACPI table\n"); return -ENODEV; } /* Should the FIFO driver handle this? */ if (buf->start_method == TPM2_START_FIFO) return -ENODEV; chip = tpmm_chip_alloc(dev, &tpm_crb); if (IS_ERR(chip)) return PTR_ERR(chip); chip->flags = TPM_CHIP_FLAG_TPM2; if (buf->hdr.length < sizeof(struct acpi_tpm2)) { dev_err(dev, "TPM2 ACPI table has wrong size"); return -EINVAL; } priv = (struct crb_priv *) devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); if (!priv) { dev_err(dev, "failed to devm_kzalloc for private data\n"); return -ENOMEM; } sm = le32_to_cpu(buf->start_method); /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs * report only ACPI start but in practice seems to require both * ACPI start and CRB start. */ if (sm == TPM2_START_CRB || sm == TPM2_START_FIFO || !strcmp(acpi_device_hid(device), "MSFT0101")) priv->flags |= CRB_FL_CRB_START; if (sm == TPM2_START_ACPI || sm == TPM2_START_CRB_WITH_ACPI) priv->flags |= CRB_FL_ACPI_START; priv->cca = (struct crb_control_area __iomem *) devm_ioremap_nocache(dev, buf->control_area_pa, 0x1000); if (!priv->cca) { dev_err(dev, "ioremap of the control area failed\n"); return -ENOMEM; } pa = ((u64) le32_to_cpu(ioread32(&priv->cca->cmd_pa_high)) << 32) | (u64) le32_to_cpu(ioread32(&priv->cca->cmd_pa_low)); priv->cmd = devm_ioremap_nocache(dev, pa, ioread32(&priv->cca->cmd_size)); if (!priv->cmd) { dev_err(dev, "ioremap of the command buffer failed\n"); return -ENOMEM; } memcpy_fromio(&pa, &priv->cca->rsp_pa, 8); pa = le64_to_cpu(pa); priv->rsp = devm_ioremap_nocache(dev, pa, ioread32(&priv->cca->rsp_size)); if (!priv->rsp) { dev_err(dev, "ioremap of the response buffer failed\n"); return -ENOMEM; } chip->vendor.priv = priv; /* Default timeouts and durations */ chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); chip->vendor.duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT); chip->vendor.duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM); chip->vendor.duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG); chip->acpi_dev_handle = device->handle; rc = tpm2_do_selftest(chip); if (rc) return rc; return tpm_chip_register(chip); }
/* * st33zp24_probe initialize the TPM device * @param: client, the i2c_client drescription (TPM I2C description). * @param: id, the i2c_device_id struct. * @return: 0 in case of success. * -1 in other case. */ int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops, struct device *dev, int irq, int io_lpcpd) { int ret; u8 intmask = 0; struct tpm_chip *chip; struct st33zp24_dev *tpm_dev; chip = tpmm_chip_alloc(dev, &st33zp24_tpm); if (IS_ERR(chip)) return PTR_ERR(chip); tpm_dev = devm_kzalloc(dev, sizeof(struct st33zp24_dev), GFP_KERNEL); if (!tpm_dev) return -ENOMEM; tpm_dev->phy_id = phy_id; tpm_dev->ops = ops; dev_set_drvdata(&chip->dev, tpm_dev); chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); tpm_dev->locality = LOCALITY0; if (irq) { /* INTERRUPT Setup */ init_waitqueue_head(&tpm_dev->read_queue); tpm_dev->intrs = 0; if (request_locality(chip) != LOCALITY0) { ret = -ENODEV; goto _tpm_clean_answer; } clear_interruption(tpm_dev); ret = devm_request_irq(dev, irq, tpm_ioserirq_handler, IRQF_TRIGGER_HIGH, "TPM SERIRQ management", chip); if (ret < 0) { dev_err(&chip->dev, "TPM SERIRQ signals %d not available\n", irq); goto _tpm_clean_answer; } intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_STS_VALID_INT | TPM_INTF_DATA_AVAIL_INT; ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_ENABLE, &intmask, 1); if (ret < 0) goto _tpm_clean_answer; intmask = TPM_GLOBAL_INT_ENABLE; ret = tpm_dev->ops->send(tpm_dev->phy_id, (TPM_INT_ENABLE + 3), &intmask, 1); if (ret < 0) goto _tpm_clean_answer; tpm_dev->irq = irq; chip->flags |= TPM_CHIP_FLAG_IRQ; disable_irq_nosync(tpm_dev->irq); } return tpm_chip_register(chip); _tpm_clean_answer: dev_info(&chip->dev, "TPM initialization fail\n"); return ret; }
static int __init init_nsc(void) { int rc = 0; int lo, hi, err; int nscAddrBase = TPM_ADDR; struct tpm_chip *chip; unsigned long base; struct tpm_nsc_priv *priv; /* verify that it is a National part (SID) */ if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) { nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)| (tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE); if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6) return -ENODEV; } err = platform_driver_register(&nsc_drv); if (err) return err; hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI); lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO); base = (hi<<8) | lo; /* enable the DPM module */ tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01); pdev = platform_device_alloc("tpm_nscl0", -1); if (!pdev) { rc = -ENOMEM; goto err_unreg_drv; } pdev->num_resources = 0; pdev->dev.driver = &nsc_drv.driver; pdev->dev.release = tpm_nsc_remove; if ((rc = platform_device_add(pdev)) < 0) goto err_put_dev; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { rc = -ENOMEM; goto err_del_dev; } priv->base = base; if (request_region(base, 2, "tpm_nsc0") == NULL ) { rc = -EBUSY; goto err_del_dev; } chip = tpmm_chip_alloc(&pdev->dev, &tpm_nsc); if (IS_ERR(chip)) { rc = -ENODEV; goto err_rel_reg; } dev_set_drvdata(&chip->dev, priv); rc = tpm_chip_register(chip); if (rc) goto err_rel_reg; dev_dbg(&pdev->dev, "NSC TPM detected\n"); dev_dbg(&pdev->dev, "NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n", tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20), tpm_read_index(nscAddrBase,0x27)); dev_dbg(&pdev->dev, "NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n", tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25), tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28)); dev_dbg(&pdev->dev, "NSC IO Base0 0x%x\n", (tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61)); dev_dbg(&pdev->dev, "NSC IO Base1 0x%x\n", (tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63)); dev_dbg(&pdev->dev, "NSC Interrupt number and wakeup 0x%x\n", tpm_read_index(nscAddrBase,0x70)); dev_dbg(&pdev->dev, "NSC IRQ type select 0x%x\n", tpm_read_index(nscAddrBase,0x71)); dev_dbg(&pdev->dev, "NSC DMA channel select0 0x%x, select1 0x%x\n", tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75)); dev_dbg(&pdev->dev, "NSC Config " "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1), tpm_read_index(nscAddrBase,0xF2), tpm_read_index(nscAddrBase,0xF3), tpm_read_index(nscAddrBase,0xF4), tpm_read_index(nscAddrBase,0xF5), tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7), tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9)); dev_info(&pdev->dev, "NSC TPM revision %d\n", tpm_read_index(nscAddrBase, 0x27) & 0x1F); return 0; err_rel_reg: release_region(base, 2); err_del_dev: platform_device_del(pdev); err_put_dev: platform_device_put(pdev); err_unreg_drv: platform_driver_unregister(&nsc_drv); return rc; }
/* * tpm_stm_i2c_probe initialize the TPM device * @param: client, the i2c_client drescription (TPM I2C description). * @param: id, the i2c_device_id struct. * @return: 0 in case of success. * -1 in other case. */ static int tpm_stm_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret; u8 intmask = 0; struct tpm_chip *chip; struct st33zp24_platform_data *platform_data; struct tpm_stm_dev *tpm_dev; if (!client) { pr_info("%s: i2c client is NULL. Device not accessible.\n", __func__); return -ENODEV; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_info(&client->dev, "client not i2c capable\n"); return -ENODEV; } tpm_dev = devm_kzalloc(&client->dev, sizeof(struct tpm_stm_dev), GFP_KERNEL); if (!tpm_dev) return -ENOMEM; chip = tpmm_chip_alloc(&client->dev, &st_i2c_tpm); if (IS_ERR(chip)) return PTR_ERR(chip); TPM_VPRIV(chip) = tpm_dev; tpm_dev->client = client; platform_data = client->dev.platform_data; if (!platform_data && client->dev.of_node) { ret = tpm_stm_i2c_of_request_resources(chip); if (ret) goto _tpm_clean_answer; } else if (platform_data) { ret = tpm_stm_i2c_request_resources(client, chip); if (ret) goto _tpm_clean_answer; } chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->vendor.locality = LOCALITY0; if (client->irq) { /* INTERRUPT Setup */ init_waitqueue_head(&chip->vendor.read_queue); tpm_dev->intrs = 0; if (request_locality(chip) != LOCALITY0) { ret = -ENODEV; goto _tpm_clean_answer; } clear_interruption(tpm_dev); ret = devm_request_irq(&client->dev, client->irq, tpm_ioserirq_handler, IRQF_TRIGGER_HIGH, "TPM SERIRQ management", chip); if (ret < 0) { dev_err(chip->pdev, "TPM SERIRQ signals %d not available\n", client->irq); goto _tpm_clean_answer; } intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_STS_VALID_INT | TPM_INTF_DATA_AVAIL_INT; ret = I2C_WRITE_DATA(tpm_dev, TPM_INT_ENABLE, &intmask, 1); if (ret < 0) goto _tpm_clean_answer; intmask = TPM_GLOBAL_INT_ENABLE; ret = I2C_WRITE_DATA(tpm_dev, (TPM_INT_ENABLE + 3), &intmask, 1); if (ret < 0) goto _tpm_clean_answer; chip->vendor.irq = client->irq; disable_irq_nosync(chip->vendor.irq); tpm_gen_interrupt(chip); } tpm_get_timeouts(chip); tpm_do_selftest(chip); return tpm_chip_register(chip); _tpm_clean_answer: dev_info(chip->pdev, "TPM I2C initialisation fail\n"); return ret; }
/** * tpm_ibmvtpm_probe - ibm vtpm initialize entry point * @vio_dev: vio device struct * @id: vio device id struct * * Return value: * 0 - Success * Non-zero - Failure */ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, const struct vio_device_id *id) { struct ibmvtpm_dev *ibmvtpm; struct device *dev = &vio_dev->dev; struct ibmvtpm_crq_queue *crq_q; struct tpm_chip *chip; int rc = -ENOMEM, rc1; chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm); if (IS_ERR(chip)) return PTR_ERR(chip); ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL); if (!ibmvtpm) { dev_err(dev, "kzalloc for ibmvtpm failed\n"); goto cleanup; } ibmvtpm->dev = dev; ibmvtpm->vdev = vio_dev; crq_q = &ibmvtpm->crq_queue; crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL); if (!crq_q->crq_addr) { dev_err(dev, "Unable to allocate memory for crq_addr\n"); goto cleanup; } crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) { dev_err(dev, "dma mapping failed\n"); goto cleanup; } rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); if (rc == H_RESOURCE) rc = ibmvtpm_reset_crq(ibmvtpm); if (rc) { dev_err(dev, "Unable to register CRQ rc=%d\n", rc); goto reg_crq_cleanup; } rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0, tpm_ibmvtpm_driver_name, ibmvtpm); if (rc) { dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq); goto init_irq_cleanup; } rc = vio_enable_interrupts(vio_dev); if (rc) { dev_err(dev, "Error %d enabling interrupts\n", rc); goto init_irq_cleanup; } init_waitqueue_head(&ibmvtpm->wq); crq_q->index = 0; dev_set_drvdata(&chip->dev, ibmvtpm); spin_lock_init(&ibmvtpm->rtce_lock); rc = ibmvtpm_crq_send_init(ibmvtpm); if (rc) goto init_irq_cleanup; rc = ibmvtpm_crq_get_version(ibmvtpm); if (rc) goto init_irq_cleanup; rc = ibmvtpm_crq_get_rtce_size(ibmvtpm); if (rc) goto init_irq_cleanup; return tpm_chip_register(chip); init_irq_cleanup: do { rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address); } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1)); reg_crq_cleanup: dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); cleanup: if (ibmvtpm) { if (crq_q->crq_addr) free_page((unsigned long)crq_q->crq_addr); kfree(ibmvtpm); } return rc; }
static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, struct resource *io_res, u64 start, u32 size) { struct resource new_res = { .start = start, .end = start + size - 1, .flags = IORESOURCE_MEM, }; /* Detect a 64 bit address on a 32 bit system */ if (start != new_res.start) return (void __iomem *) ERR_PTR(-EINVAL); if (!resource_contains(io_res, &new_res)) return devm_ioremap_resource(dev, &new_res); return priv->iobase + (new_res.start - io_res->start); } /* * Work around broken BIOSs that return inconsistent values from the ACPI * region vs the registers. Trust the ACPI region. Such broken systems * probably cannot send large TPM commands since the buffer will be truncated. */ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res, u64 start, u64 size) { if (io_res->start > start || io_res->end < start) return size; if (start + size - 1 <= io_res->end) return size; dev_err(dev, FW_BUG "ACPI region does not cover the entire command/response buffer. %pr vs %llx %llx\n", io_res, start, size); return io_res->end - start + 1; } static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, struct acpi_table_tpm2 *buf) { struct list_head resources; struct resource io_res; struct device *dev = &device->dev; u32 pa_high, pa_low; u64 cmd_pa; u32 cmd_size; __le64 __rsp_pa; u64 rsp_pa; u32 rsp_size; int ret; INIT_LIST_HEAD(&resources); ret = acpi_dev_get_resources(device, &resources, crb_check_resource, &io_res); if (ret < 0) return ret; acpi_dev_free_resource_list(&resources); if (resource_type(&io_res) != IORESOURCE_MEM) { dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); return -EINVAL; } priv->iobase = devm_ioremap_resource(dev, &io_res); if (IS_ERR(priv->iobase)) return PTR_ERR(priv->iobase); /* The ACPI IO region starts at the head area and continues to include * the control area, as one nice sane region except for some older * stuff that puts the control area outside the ACPI IO region. */ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { if (buf->control_address == io_res.start + sizeof(*priv->regs_h)) priv->regs_h = priv->iobase; else dev_warn(dev, FW_BUG "Bad ACPI memory layout"); } ret = __crb_request_locality(dev, priv, 0); if (ret) return ret; priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address, sizeof(struct crb_regs_tail)); if (IS_ERR(priv->regs_t)) return PTR_ERR(priv->regs_t); /* * PTT HW bug w/a: wake up the device to access * possibly not retained registers. */ ret = crb_cmd_ready(dev, priv); if (ret) return ret; pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high); pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low); cmd_pa = ((u64)pa_high << 32) | pa_low; cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa, ioread32(&priv->regs_t->ctrl_cmd_size)); dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n", pa_high, pa_low, cmd_size); priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); if (IS_ERR(priv->cmd)) { ret = PTR_ERR(priv->cmd); goto out; } memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8); rsp_pa = le64_to_cpu(__rsp_pa); rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa, ioread32(&priv->regs_t->ctrl_rsp_size)); if (cmd_pa != rsp_pa) { priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); ret = PTR_ERR_OR_ZERO(priv->rsp); goto out; } /* According to the PTP specification, overlapping command and response * buffer sizes must be identical. */ if (cmd_size != rsp_size) { dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical"); ret = -EINVAL; goto out; } priv->rsp = priv->cmd; out: if (!ret) priv->cmd_size = cmd_size; crb_go_idle(dev, priv); __crb_relinquish_locality(dev, priv, 0); return ret; } static int crb_acpi_add(struct acpi_device *device) { struct acpi_table_tpm2 *buf; struct crb_priv *priv; struct tpm_chip *chip; struct device *dev = &device->dev; struct tpm2_crb_smc *crb_smc; acpi_status status; u32 sm; int rc; status = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **) &buf); if (ACPI_FAILURE(status) || buf->header.length < sizeof(*buf)) { dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); return -EINVAL; } /* Should the FIFO driver handle this? */ sm = buf->start_method; if (sm == ACPI_TPM2_MEMORY_MAPPED) return -ENODEV; priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); if (!priv) return -ENOMEM; if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) { dev_err(dev, FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", buf->header.length, ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); return -EINVAL; } crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf)); priv->smc_func_id = crb_smc->smc_func_id; } priv->sm = sm; priv->hid = acpi_device_hid(device); rc = crb_map_io(device, priv, buf); if (rc) return rc; chip = tpmm_chip_alloc(dev, &tpm_crb); if (IS_ERR(chip)) return PTR_ERR(chip); dev_set_drvdata(&chip->dev, priv); chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; rc = __crb_request_locality(dev, priv, 0); if (rc) return rc; rc = crb_cmd_ready(dev, priv); if (rc) goto out; pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); rc = tpm_chip_register(chip); if (rc) { crb_go_idle(dev, priv); pm_runtime_put_noidle(dev); pm_runtime_disable(dev); goto out; } pm_runtime_put_sync(dev); out: __crb_relinquish_locality(dev, priv, 0); return rc; } static int crb_acpi_remove(struct acpi_device *device) { struct device *dev = &device->dev; struct tpm_chip *chip = dev_get_drvdata(dev); tpm_chip_unregister(chip); pm_runtime_disable(dev); return 0; } static int __maybe_unused crb_pm_runtime_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct crb_priv *priv = dev_get_drvdata(&chip->dev); return crb_go_idle(dev, priv); } static int __maybe_unused crb_pm_runtime_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct crb_priv *priv = dev_get_drvdata(&chip->dev); return crb_cmd_ready(dev, priv); } static int __maybe_unused crb_pm_suspend(struct device *dev) { int ret; ret = tpm_pm_suspend(dev); if (ret) return ret; return crb_pm_runtime_suspend(dev); }