/** * mei_remove - Device Removal Routine * * @pdev: PCI device structure * * mei_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. */ static void mei_me_remove(struct pci_dev *pdev) { struct mei_device *dev; struct mei_me_hw *hw; dev = pci_get_drvdata(pdev); if (!dev) return; hw = to_me_hw(dev); dev_dbg(&pdev->dev, "stop\n"); mei_stop(dev); /* disable interrupts */ mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); if (hw->mem_addr) pci_iounmap(pdev, hw->mem_addr); mei_deregister(dev); kfree(dev); pci_release_regions(pdev); pci_disable_device(pdev); }
static int mei_txe_pm_runtime_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev; int ret; dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); dev = pci_get_drvdata(pdev); if (!dev) return -ENODEV; mutex_lock(&dev->device_lock); if (mei_write_is_idle(dev)) ret = mei_txe_aliveness_set_sync(dev, 0); else ret = -EAGAIN; /* * If everything is okay we're about to enter PCI low * power state (D3) therefor we need to disable the * interrupts towards host. * However if device is not wakeable we do not enter * D-low state and we need to keep the interrupt kicking */ if (!ret && pci_dev_run_wake(pdev)) mei_disable_interrupts(dev); dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); mutex_unlock(&dev->device_lock); return ret; }
/** * mei_remove - Device Removal Routine * * @pdev: PCI device structure * * mei_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. */ static void __devexit mei_remove(struct pci_dev *pdev) { struct mei_device *dev; if (mei_device != pdev) return; dev = pci_get_drvdata(pdev); if (!dev) return; mutex_lock(&dev->device_lock); mei_wd_stop(dev, false); mei_device = NULL; if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) { dev->iamthif_cl.state = MEI_FILE_DISCONNECTING; mei_disconnect_host_client(dev, &dev->iamthif_cl); } if (dev->wd_cl.state == MEI_FILE_CONNECTED) { dev->wd_cl.state = MEI_FILE_DISCONNECTING; mei_disconnect_host_client(dev, &dev->wd_cl); } /* remove entry if already in list */ dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n"); mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id); mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id); dev->iamthif_current_cb = NULL; dev->num_mei_me_clients = 0; mutex_unlock(&dev->device_lock); flush_scheduled_work(); /* disable interrupts */ mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_set_drvdata(pdev, NULL); if (dev->mem_addr) pci_iounmap(pdev, dev->mem_addr); kfree(dev); pci_release_regions(pdev); pci_disable_device(pdev); }
/** * mei_txe_remove - Device Shutdown Routine * * @pdev: PCI device structure * * mei_txe_shutdown is called from the reboot notifier * it's a simplified version of remove so we go down * faster. */ static void mei_txe_shutdown(struct pci_dev *pdev) { struct mei_device *dev; dev = pci_get_drvdata(pdev); if (!dev) return; dev_dbg(&pdev->dev, "shutdown\n"); mei_stop(dev); if (!pci_dev_run_wake(pdev)) mei_txe_unset_pm_domain(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); }
static int mei_me_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev = pci_get_drvdata(pdev); if (!dev) return -ENODEV; dev_dbg(&pdev->dev, "suspend\n"); mei_stop(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); return 0; }
/** * mei_remove - Device Removal Routine * * @pdev: PCI device structure * * mei_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. */ static void mei_txe_remove(struct pci_dev *pdev) { struct mei_device *dev; struct mei_txe_hw *hw; dev = pci_get_drvdata(pdev); if (!dev) { dev_err(&pdev->dev, "mei: dev =NULL\n"); return; } pm_runtime_get_noresume(&pdev->dev); hw = to_txe_hw(dev); mei_stop(dev); if (!pci_dev_run_wake(pdev)) mei_txe_unset_pm_domain(dev); /* disable interrupts */ mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); mei_mm_deinit(hw->mdev); if (hw->pool_release) hw->pool_release(hw); pci_set_drvdata(pdev, NULL); mei_txe_pci_iounmap(pdev, hw); mei_deregister(dev); kfree(dev); pci_release_regions(pdev); pci_disable_device(pdev); }
/** * mei_me_remove - Device Removal Routine * * @pdev: PCI device structure * * mei_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. */ static void mei_me_remove(struct pci_dev *pdev) { struct mei_device *dev; struct mei_me_hw *hw; dev = pci_get_drvdata(pdev); if (!dev) return; if (mei_pg_is_enabled(dev)) pm_runtime_get_noresume(&pdev->dev); hw = to_me_hw(dev); dev_dbg(&pdev->dev, "stop\n"); mei_stop(dev); if (!pci_dev_run_wake(pdev)) mei_me_unset_pm_domain(dev); /* disable interrupts */ mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); if (hw->mem_addr) pci_iounmap(pdev, hw->mem_addr); mei_deregister(dev); kfree(dev); pci_release_regions(pdev); pci_disable_device(pdev); }
/** * mei_txe_remove - Device Removal Routine * * @pdev: PCI device structure * * mei_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. */ static void mei_txe_remove(struct pci_dev *pdev) { struct mei_device *dev; dev = pci_get_drvdata(pdev); if (!dev) { dev_err(&pdev->dev, "mei: dev == NULL\n"); return; } pm_runtime_get_noresume(&pdev->dev); mei_stop(dev); if (!pci_dev_run_wake(pdev)) mei_txe_unset_pm_domain(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); mei_deregister(dev); }
/** * mei_me_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in kcs_pci_tbl * * Return: 0 on success, <0 on failure. */ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); struct mei_device *dev; struct mei_me_hw *hw; int err; if (!mei_me_quirk_probe(pdev, cfg)) return -ENODEV; /* enable pci dev */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); } if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto release_regions; } /* allocates and initializes the mei dev structure */ dev = mei_me_dev_init(pdev, cfg); if (!dev) { err = -ENOMEM; goto release_regions; } hw = to_me_hw(dev); /* mapping IO device memory */ hw->mem_addr = pci_iomap(pdev, 0, 0); if (!hw->mem_addr) { dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } pci_enable_msi(pdev); /* request and enable interrupt */ if (pci_dev_msi_enabled(pdev)) err = request_threaded_irq(pdev->irq, NULL, mei_me_irq_thread_handler, IRQF_ONESHOT, KBUILD_MODNAME, dev); else err = request_threaded_irq(pdev->irq, mei_me_irq_quick_handler, mei_me_irq_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", pdev->irq); goto disable_msi; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; goto release_irq; } pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); err = mei_register(dev, &pdev->dev); if (err) goto release_irq; pci_set_drvdata(pdev, dev); schedule_delayed_work(&dev->timer_work, HZ); /* * For not wake-able HW runtime pm framework * can't be used on pci device level. * Use domain runtime pm callbacks instead. */ if (!pci_dev_run_wake(pdev)) mei_me_set_pm_domain(dev); if (mei_pg_is_enabled(dev)) pm_runtime_put_noidle(&pdev->dev); dev_dbg(&pdev->dev, "initialization successful.\n"); return 0; release_irq: mei_cancel_work(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); disable_msi: pci_disable_msi(pdev); pci_iounmap(pdev, hw->mem_addr); free_device: kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; }
/** * mei_txe_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in mei_txe_pci_tbl * * Return: 0 on success, <0 on failure. */ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct mei_device *dev; struct mei_txe_hw *hw; int err; int i; /* enable pci dev */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No suitable DMA available.\n"); goto release_regions; } } /* allocates and initializes the mei dev structure */ dev = mei_txe_dev_init(pdev); if (!dev) { err = -ENOMEM; goto release_regions; } hw = to_txe_hw(dev); /* mapping IO device memory */ for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { hw->mem_addr[i] = pci_iomap(pdev, i, 0); if (!hw->mem_addr[i]) { dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } } pci_enable_msi(pdev); /* clear spurious interrupts */ mei_clear_interrupts(dev); /* request and enable interrupt */ if (pci_dev_msi_enabled(pdev)) err = request_threaded_irq(pdev->irq, NULL, mei_txe_irq_thread_handler, IRQF_ONESHOT, KBUILD_MODNAME, dev); else err = request_threaded_irq(pdev->irq, mei_txe_irq_quick_handler, mei_txe_irq_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", pdev->irq); goto free_device; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; goto release_irq; } pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); err = mei_register(dev, &pdev->dev); if (err) goto stop; pci_set_drvdata(pdev, dev); /* * For not wake-able HW runtime pm framework * can't be used on pci device level. * Use domain runtime pm callbacks instead. */ if (!pci_dev_run_wake(pdev)) mei_txe_set_pm_domain(dev); pm_runtime_put_noidle(&pdev->dev); return 0; stop: mei_stop(dev); release_irq: mei_cancel_work(dev); /* disable interrupts */ mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); free_device: mei_txe_pci_iounmap(pdev, hw); kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; }
/** * mei_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in kcs_pci_tbl * * returns 0 on success, <0 on failure. */ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct mei_device *dev; struct mei_me_hw *hw; int err; if (!mei_me_quirk_probe(pdev, ent)) { err = -ENODEV; goto end; } /* enable pci dev */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); } if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto release_regions; } /* allocates and initializes the mei dev structure */ dev = mei_me_dev_init(pdev); if (!dev) { err = -ENOMEM; goto release_regions; } hw = to_me_hw(dev); /* mapping IO device memory */ hw->mem_addr = pci_iomap(pdev, 0, 0); if (!hw->mem_addr) { dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } pci_enable_msi(pdev); /* request and enable interrupt */ if (pci_dev_msi_enabled(pdev)) err = request_threaded_irq(pdev->irq, NULL, mei_me_irq_thread_handler, IRQF_ONESHOT, KBUILD_MODNAME, dev); else err = request_threaded_irq(pdev->irq, mei_me_irq_quick_handler, mei_me_irq_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", pdev->irq); goto disable_msi; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; goto release_irq; } err = mei_register(dev); if (err) goto release_irq; pci_set_drvdata(pdev, dev); schedule_delayed_work(&dev->timer_work, HZ); dev_dbg(&pdev->dev, "initialization successful.\n"); return 0; release_irq: mei_cancel_work(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); disable_msi: pci_disable_msi(pdev); pci_iounmap(pdev, hw->mem_addr); free_device: kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; }
/** * mei_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in kcs_pci_tbl * * returns 0 on success, <0 on failure. */ static int __devinit mei_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct mei_device *dev; int err; mutex_lock(&mei_mutex); if (mei_device) { err = -EEXIST; goto end; } /* enable pci dev */ err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "mei: Failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, mei_driver_name); if (err) { printk(KERN_ERR "mei: Failed to get pci regions.\n"); goto disable_device; } /* allocates and initializes the mei dev structure */ dev = init_mei_device(pdev); if (!dev) { err = -ENOMEM; goto release_regions; } /* mapping IO device memory */ dev->mem_addr = pci_iomap(pdev, 0, 0); if (!dev->mem_addr) { printk(KERN_ERR "mei: mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } /* request and enable interrupt */ err = request_threaded_irq(pdev->irq, mei_interrupt_quick_handler, mei_interrupt_thread_handler, IRQF_SHARED, mei_driver_name, dev); if (err) { printk(KERN_ERR "mei: request_threaded_irq failure. irq = %d\n", pdev->irq); goto unmap_memory; } INIT_DELAYED_WORK(&dev->wd_work, mei_wd_timer); if (mei_hw_init(dev)) { printk(KERN_ERR "mei: Init hw failure.\n"); err = -ENODEV; goto release_irq; } mei_device = pdev; pci_set_drvdata(pdev, dev); schedule_delayed_work(&dev->wd_work, HZ); mutex_unlock(&mei_mutex); pr_debug("mei: Driver initialization successful.\n"); return 0; release_irq: /* disable interrupts */ dev->host_hw_state = mei_hcsr_read(dev); mei_disable_interrupts(dev); flush_scheduled_work(); free_irq(pdev->irq, dev); unmap_memory: pci_iounmap(pdev, dev->mem_addr); free_device: kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: mutex_unlock(&mei_mutex); printk(KERN_ERR "mei: Driver initialization failed.\n"); return err; }