/** * mei_txe_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in mei_txe_pci_tbl * * Return: 0 on success, <0 on failure. */ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct mei_device *dev; struct mei_txe_hw *hw; int err; int i; /* enable pci dev */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No suitable DMA available.\n"); goto release_regions; } } /* allocates and initializes the mei dev structure */ dev = mei_txe_dev_init(pdev); if (!dev) { err = -ENOMEM; goto release_regions; } hw = to_txe_hw(dev); /* mapping IO device memory */ for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { hw->mem_addr[i] = pci_iomap(pdev, i, 0); if (!hw->mem_addr[i]) { dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } } pci_enable_msi(pdev); /* clear spurious interrupts */ mei_clear_interrupts(dev); /* request and enable interrupt */ if (pci_dev_msi_enabled(pdev)) err = request_threaded_irq(pdev->irq, NULL, mei_txe_irq_thread_handler, IRQF_ONESHOT, KBUILD_MODNAME, dev); else err = request_threaded_irq(pdev->irq, mei_txe_irq_quick_handler, mei_txe_irq_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", pdev->irq); goto free_device; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; goto release_irq; } pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); err = mei_register(dev, &pdev->dev); if (err) goto stop; pci_set_drvdata(pdev, dev); /* * For not wake-able HW runtime pm framework * can't be used on pci device level. * Use domain runtime pm callbacks instead. */ if (!pci_dev_run_wake(pdev)) mei_txe_set_pm_domain(dev); pm_runtime_put_noidle(&pdev->dev); return 0; stop: mei_stop(dev); release_irq: mei_cancel_work(dev); /* disable interrupts */ mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); free_device: mei_txe_pci_iounmap(pdev, hw); kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; }
/** * mei_me_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in kcs_pci_tbl * * Return: 0 on success, <0 on failure. */ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); struct mei_device *dev; struct mei_me_hw *hw; int err; if (!mei_me_quirk_probe(pdev, cfg)) return -ENODEV; /* enable pci dev */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); } if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto release_regions; } /* allocates and initializes the mei dev structure */ dev = mei_me_dev_init(pdev, cfg); if (!dev) { err = -ENOMEM; goto release_regions; } hw = to_me_hw(dev); /* mapping IO device memory */ hw->mem_addr = pci_iomap(pdev, 0, 0); if (!hw->mem_addr) { dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } pci_enable_msi(pdev); /* request and enable interrupt */ if (pci_dev_msi_enabled(pdev)) err = request_threaded_irq(pdev->irq, NULL, mei_me_irq_thread_handler, IRQF_ONESHOT, KBUILD_MODNAME, dev); else err = request_threaded_irq(pdev->irq, mei_me_irq_quick_handler, mei_me_irq_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", pdev->irq); goto disable_msi; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; goto release_irq; } pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); err = mei_register(dev, &pdev->dev); if (err) goto release_irq; pci_set_drvdata(pdev, dev); schedule_delayed_work(&dev->timer_work, HZ); /* * For not wake-able HW runtime pm framework * can't be used on pci device level. * Use domain runtime pm callbacks instead. */ if (!pci_dev_run_wake(pdev)) mei_me_set_pm_domain(dev); if (mei_pg_is_enabled(dev)) pm_runtime_put_noidle(&pdev->dev); dev_dbg(&pdev->dev, "initialization successful.\n"); return 0; release_irq: mei_cancel_work(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); disable_msi: pci_disable_msi(pdev); pci_iounmap(pdev, hw->mem_addr); free_device: kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; }
/** * mei_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in kcs_pci_tbl * * returns 0 on success, <0 on failure. */ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct mei_device *dev; struct mei_me_hw *hw; int err; if (!mei_me_quirk_probe(pdev, ent)) { err = -ENODEV; goto end; } /* enable pci dev */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); } if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto release_regions; } /* allocates and initializes the mei dev structure */ dev = mei_me_dev_init(pdev); if (!dev) { err = -ENOMEM; goto release_regions; } hw = to_me_hw(dev); /* mapping IO device memory */ hw->mem_addr = pci_iomap(pdev, 0, 0); if (!hw->mem_addr) { dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } pci_enable_msi(pdev); /* request and enable interrupt */ if (pci_dev_msi_enabled(pdev)) err = request_threaded_irq(pdev->irq, NULL, mei_me_irq_thread_handler, IRQF_ONESHOT, KBUILD_MODNAME, dev); else err = request_threaded_irq(pdev->irq, mei_me_irq_quick_handler, mei_me_irq_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", pdev->irq); goto disable_msi; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; goto release_irq; } err = mei_register(dev); if (err) goto release_irq; pci_set_drvdata(pdev, dev); schedule_delayed_work(&dev->timer_work, HZ); dev_dbg(&pdev->dev, "initialization successful.\n"); return 0; release_irq: mei_cancel_work(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); disable_msi: pci_disable_msi(pdev); pci_iounmap(pdev, hw->mem_addr); free_device: kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; }