int mei_txe_dma_setup(struct mei_device *dev) { struct mei_txe_hw *hw = to_txe_hw(dev); int err; err = mei_reserver_dma_acpi(dev); if (err) err = mei_alloc_dma(dev); if (err) return err; err = mei_txe_setup_satt2(dev, dma_to_phys(&dev->pdev->dev, hw->pool_paddr), hw->pool_size); if (err) { if (hw->pool_release) hw->pool_release(hw); return err; } hw->mdev = mei_mm_init(&dev->pdev->dev, hw->pool_vaddr, hw->pool_paddr, hw->pool_size); if (IS_ERR_OR_NULL(hw->mdev)) return PTR_ERR(hw->mdev); return 0; }
/** * mei_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in mei_txe_pci_tbl * * returns 0 on success, <0 on failure. */ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct mei_device *dev; struct mei_txe_hw *hw; int err; int i; /* enable pci dev */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No suitable DMA available.\n"); goto release_regions; } } /* allocates and initializes the mei dev structure */ dev = mei_txe_dev_init(pdev); if (!dev) { err = -ENOMEM; goto release_regions; } hw = to_txe_hw(dev); err = mei_reserver_dma_acpi(dev); if (err) err = mei_alloc_dma(dev); if (err) goto free_device; /* mapping IO device memory */ for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { hw->mem_addr[i] = pci_iomap(pdev, i, 0); if (!hw->mem_addr[i]) { dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } } pci_enable_msi(pdev); /* clear spurious interrupts */ mei_clear_interrupts(dev); /* request and enable interrupt */ if (pci_dev_msi_enabled(pdev)) err = request_threaded_irq(pdev->irq, NULL, mei_txe_irq_thread_handler, IRQF_ONESHOT, KBUILD_MODNAME, dev); else err = request_threaded_irq(pdev->irq, mei_txe_irq_quick_handler, mei_txe_irq_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", pdev->irq); goto free_device; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; goto release_irq; } err = mei_txe_setup_satt2(dev, dma_to_phys(&dev->pdev->dev, hw->pool_paddr), hw->pool_size); if (err) goto release_irq; err = mei_register(dev); if (err) goto release_irq; pci_set_drvdata(pdev, dev); hw->mdev = mei_mm_init(&dev->pdev->dev, hw->pool_vaddr, hw->pool_paddr, hw->pool_size); if (IS_ERR_OR_NULL(hw->mdev)) goto deregister_mei; pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_mark_last_busy(&pdev->dev); /* * For not wake-able HW runtime pm framework * can't be used on pci device level. * Use domain runtime pm callbacks instead. */ if (!pci_dev_run_wake(pdev)) mei_txe_set_pm_domain(dev); pm_runtime_put_noidle(&pdev->dev); if (!nopg) pm_runtime_allow(&pdev->dev); return 0; deregister_mei: mei_deregister(dev); release_irq: mei_cancel_work(dev); /* disable interrupts */ mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); free_device: if (hw->pool_release) hw->pool_release(hw); mei_txe_pci_iounmap(pdev, hw); kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; }