/** * mei_me_remove - Device Removal Routine * * @pdev: PCI device structure * * mei_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. */ static void mei_me_remove(struct pci_dev *pdev) { struct mei_device *dev; struct mei_me_hw *hw; dev = pci_get_drvdata(pdev); if (!dev) return; if (mei_pg_is_enabled(dev)) pm_runtime_get_noresume(&pdev->dev); hw = to_me_hw(dev); dev_dbg(&pdev->dev, "stop\n"); mei_stop(dev); if (!pci_dev_run_wake(pdev)) mei_me_unset_pm_domain(dev); /* disable interrupts */ mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); if (hw->mem_addr) pci_iounmap(pdev, hw->mem_addr); mei_deregister(dev); kfree(dev); pci_release_regions(pdev); pci_disable_device(pdev); }
/* * power management */ static int rtsx_suspend(struct pci_dev *pci, pm_message_t state) { struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci); struct rtsx_chip *chip; printk(KERN_INFO "Ready to suspend\n"); if (!dev) { printk(KERN_ERR "Invalid memory\n"); return 0; } mutex_lock(&(dev->dev_mutex)); chip = dev->chip; rtsx_do_before_power_down(chip, PM_S3); if (dev->irq >= 0) { synchronize_irq(dev->irq); free_irq(dev->irq, (void *)dev); dev->irq = -1; } if (chip->msi_en) { pci_disable_msi(pci); } pci_save_state(pci); pci_enable_wake(pci, pci_choose_state(pci, state), 1); pci_disable_device(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); mutex_unlock(&dev->dev_mutex); return 0; }
static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) { struct pci_dev *pdev = vdev->pdev; int i; vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); for (i = 0; i < vdev->num_ctx; i++) { virqfd_disable(vdev, &vdev->ctx[i].unmask); virqfd_disable(vdev, &vdev->ctx[i].mask); } if (msix) { pci_disable_msix(vdev->pdev); kfree(vdev->msix); } else pci_disable_msi(pdev); vdev->irq_type = VFIO_PCI_NUM_IRQS; vdev->num_ctx = 0; kfree(vdev->ctx); }
/** * mei_remove - Device Removal Routine * * @pdev: PCI device structure * * mei_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. */ static void mei_me_remove(struct pci_dev *pdev) { struct mei_device *dev; struct mei_me_hw *hw; if (mei_pdev != pdev) return; dev = pci_get_drvdata(pdev); if (!dev) return; hw = to_me_hw(dev); dev_dbg(&pdev->dev, "stop\n"); mei_stop(dev); mei_pdev = NULL; /* disable interrupts */ mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); if (hw->mem_addr) pci_iounmap(pdev, hw->mem_addr); mei_deregister(dev); kfree(dev); pci_release_regions(pdev); pci_disable_device(pdev); }
/** * amdgpu_irq_fini - tear down driver interrupt info * * @adev: amdgpu device pointer * * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). */ void amdgpu_irq_fini(struct amdgpu_device *adev) { unsigned i; drm_vblank_cleanup(adev->ddev); if (adev->irq.installed) { drm_irq_uninstall(adev->ddev); adev->irq.installed = false; if (adev->irq.msi_enabled) pci_disable_msi(adev->pdev); flush_work(&adev->hotplug_work); } for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { struct amdgpu_irq_src *src = adev->irq.sources[i]; if (!src) continue; kfree(src->enabled_types); src->enabled_types = NULL; } }
static void i2c_vr_remove(struct pci_dev *dev) { struct i2c_vr_regs __iomem *regs; /* disable device */ regs = i2c_vr[0].regs; writel(0, ®s->icr); regs = i2c_vr[1].regs; writel(0, ®s->icr); pci_set_drvdata(dev, NULL); i2c_del_adapter(&i2c_vr[1].adap); i2c_del_adapter(&i2c_vr[0].adap); free_irq(dev->irq, i2c_vr); pci_disable_msi(dev); i2c_vr_unmapregs(dev, 1); i2c_vr_unmapregs(dev, 0); pci_release_regions(dev); pci_disable_device(dev); }
static int chd_dec_enable_int(struct crystalhd_adp *adp) { int rc = 0; if (!adp || !adp->pdev) { BCMLOG_ERR("Invalid arg!!\n"); return -EINVAL; } if (adp->pdev->msi_enabled) adp->msi = 1; else adp->msi = pci_enable_msi(adp->pdev); rc = request_irq(adp->pdev->irq, chd_dec_isr, IRQF_SHARED, adp->name, (void *)adp); if (rc) { BCMLOG_ERR("Interrupt request failed..\n"); pci_disable_msi(adp->pdev); } return rc; }
static int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", pci_name(dev)); if (dev->msi_enabled) { struct xen_pcibk_dev_data *dev_data; pci_disable_msi(dev); dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 1; } op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), op->value); return 0; }
static void __devexit atomisp_pci_remove(struct pci_dev *dev) { struct atomisp_device *isp = (struct atomisp_device *) pci_get_drvdata(dev); pm_runtime_forbid(&dev->dev); pm_runtime_get_noresume(&dev->dev); pm_qos_remove_request(&isp->pm_qos); atomisp_msi_irq_uninit(isp, dev); free_irq(dev->irq, isp); pci_disable_msi(dev); pci_dev_put(isp->hw_contex.pci_root); atomisp_unregister_entities(isp); flush_workqueue(isp->work_queue); destroy_workqueue(isp->work_queue); iounmap(atomisp_io_base); pci_set_drvdata(dev, NULL); pci_release_region(dev, 0); pci_disable_device(dev); /* in case user forget to close */ /* * fixing me: * MRFLD VP does not use firmware loading * from file system */ if (!IS_MRFLD) release_firmware(isp->firmware); hmm_pool_unregister(HMM_POOL_TYPE_RESERVED); kfree(isp); }
/* Release all our dynamic resources */ static void rtsx_release_resources(struct rtsx_dev *dev) { dev_info(&dev->pci->dev, "-- %s\n", __func__); /* Tell the control thread to exit. The SCSI host must * already have been removed so it won't try to queue * any more commands. */ dev_info(&dev->pci->dev, "-- sending exit command to thread\n"); complete(&dev->cmnd_ready); if (dev->ctl_thread) wait_for_completion(&dev->control_exit); if (dev->polling_thread) wait_for_completion(&dev->polling_exit); wait_timeout(200); if (dev->rtsx_resv_buf) { dma_free_coherent(&(dev->pci->dev), RTSX_RESV_BUF_LEN, dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr); dev->chip->host_cmds_ptr = NULL; dev->chip->host_sg_tbl_ptr = NULL; } if (dev->irq > 0) free_irq(dev->irq, (void *)dev); if (dev->chip->msi_en) pci_disable_msi(dev->pci); if (dev->remap_addr) iounmap(dev->remap_addr); pci_disable_device(dev->pci); pci_release_regions(dev->pci); rtsx_release_chip(dev->chip); kfree(dev->chip); }
/** * mei_txe_remove - Device Removal Routine * * @pdev: PCI device structure * * mei_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. */ static void mei_txe_remove(struct pci_dev *pdev) { struct mei_device *dev; struct mei_txe_hw *hw; dev = pci_get_drvdata(pdev); if (!dev) { dev_err(&pdev->dev, "mei: dev =NULL\n"); return; } pm_runtime_get_noresume(&pdev->dev); hw = to_txe_hw(dev); mei_stop(dev); if (!pci_dev_run_wake(pdev)) mei_txe_unset_pm_domain(dev); /* disable interrupts */ mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); pci_set_drvdata(pdev, NULL); mei_txe_pci_iounmap(pdev, hw); mei_deregister(dev); kfree(dev); pci_release_regions(pdev); pci_disable_device(pdev); }
static void bcwc_pci_remove(struct pci_dev *pdev) { struct bcwc_private *dev_priv; dev_priv = pci_get_drvdata(pdev); if (dev_priv) { bcwc_irq_disable(dev_priv); pci_disable_msi(pdev); if (dev_priv->s2_io) iounmap(dev_priv->s2_io); if (dev_priv->s2_mem) iounmap(dev_priv->s2_mem); if (dev_priv->isp_io) iounmap(dev_priv->isp_io); pci_release_region(pdev, BCWC_PCI_S2_IO); pci_release_region(pdev, BCWC_PCI_S2_MEM); pci_release_region(pdev, BCWC_PCI_ISP_IO); } pci_disable_device(pdev); }
static void __devexit ddb_remove(struct pci_dev *pdev) { struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev); ddb_ports_detach(dev); ddb_i2c_release(dev); ddbwritel(dev, 0, INTERRUPT_ENABLE); ddbwritel(dev, 0, MSI1_ENABLE); if (dev->msi == 2) free_irq(dev->pdev->irq + 1, dev); free_irq(dev->pdev->irq, dev); #ifdef CONFIG_PCI_MSI if (dev->msi) pci_disable_msi(dev->pdev); #endif ddb_ports_release(dev); ddb_buffers_free(dev); ddb_device_destroy(dev); ddb_unmap(dev); pci_set_drvdata(pdev, 0); pci_disable_device(pdev); }
static int pci_endpoint_test_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int i; int err; int irq = 0; int id; char name[20]; enum pci_barno bar; void __iomem *base; struct device *dev = &pdev->dev; struct pci_endpoint_test *test; struct pci_endpoint_test_data *data; enum pci_barno test_reg_bar = BAR_0; struct miscdevice *misc_device; if (pci_is_bridge(pdev)) return -ENODEV; test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL); if (!test) return -ENOMEM; test->test_reg_bar = 0; test->alignment = 0; test->pdev = pdev; data = (struct pci_endpoint_test_data *)ent->driver_data; if (data) { test_reg_bar = data->test_reg_bar; test->alignment = data->alignment; no_msi = data->no_msi; } init_completion(&test->irq_raised); mutex_init(&test->mutex); err = pci_enable_device(pdev); if (err) { dev_err(dev, "Cannot enable PCI device\n"); return err; } err = pci_request_regions(pdev, DRV_MODULE_NAME); if (err) { dev_err(dev, "Cannot obtain PCI resources\n"); goto err_disable_pdev; } pci_set_master(pdev); if (!no_msi) { irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); if (irq < 0) dev_err(dev, "failed to get MSI interrupts\n"); } err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler, IRQF_SHARED, DRV_MODULE_NAME, test); if (err) { dev_err(dev, "failed to request IRQ %d\n", pdev->irq); goto err_disable_msi; } for (i = 1; i < irq; i++) { err = devm_request_irq(dev, pdev->irq + i, pci_endpoint_test_irqhandler, IRQF_SHARED, DRV_MODULE_NAME, test); if (err) dev_err(dev, "failed to request IRQ %d for MSI %d\n", pdev->irq + i, i + 1); } for (bar = BAR_0; bar <= BAR_5; bar++) { base = pci_ioremap_bar(pdev, bar); if (!base) { dev_err(dev, "failed to read BAR%d\n", bar); WARN_ON(bar == test_reg_bar); } test->bar[bar] = base; } test->base = test->bar[test_reg_bar]; if (!test->base) { dev_err(dev, "Cannot perform PCI test without BAR%d\n", test_reg_bar); goto err_iounmap; } pci_set_drvdata(pdev, test); id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); if (id < 0) { dev_err(dev, "unable to get id\n"); goto err_iounmap; } snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); misc_device = &test->miscdev; misc_device->minor = MISC_DYNAMIC_MINOR; misc_device->name = name; misc_device->fops = &pci_endpoint_test_fops, err = misc_register(misc_device); if (err) { dev_err(dev, "failed to register device\n"); goto err_ida_remove; } return 0; err_ida_remove: ida_simple_remove(&pci_endpoint_test_ida, id); err_iounmap: for (bar = BAR_0; bar <= BAR_5; bar++) { if (test->bar[bar]) pci_iounmap(pdev, test->bar[bar]); } err_disable_msi: pci_disable_msi(pdev); pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); return err; }
/* * adf_disable_msi * Function disables MSI capability */ STATIC void adf_disable_msi(icp_accel_pci_info_t *pci_dev_info) { ADF_DEBUG("Disabling MSI capability\n"); pci_disable_msi(pci_dev_info->pDev); pci_dev_info->irq = pci_dev_info->pDev->irq; }
/** * mei_me_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in kcs_pci_tbl * * Return: 0 on success, <0 on failure. */ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); struct mei_device *dev; struct mei_me_hw *hw; int err; if (!mei_me_quirk_probe(pdev, cfg)) return -ENODEV; /* enable pci dev */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); } if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto release_regions; } /* allocates and initializes the mei dev structure */ dev = mei_me_dev_init(pdev, cfg); if (!dev) { err = -ENOMEM; goto release_regions; } hw = to_me_hw(dev); /* mapping IO device memory */ hw->mem_addr = pci_iomap(pdev, 0, 0); if (!hw->mem_addr) { dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } pci_enable_msi(pdev); /* request and enable interrupt */ if (pci_dev_msi_enabled(pdev)) err = request_threaded_irq(pdev->irq, NULL, mei_me_irq_thread_handler, IRQF_ONESHOT, KBUILD_MODNAME, dev); else err = request_threaded_irq(pdev->irq, mei_me_irq_quick_handler, mei_me_irq_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", pdev->irq); goto disable_msi; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; goto release_irq; } pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); err = mei_register(dev, &pdev->dev); if (err) goto release_irq; pci_set_drvdata(pdev, dev); schedule_delayed_work(&dev->timer_work, HZ); /* * For not wake-able HW runtime pm framework * can't be used on pci device level. * Use domain runtime pm callbacks instead. */ if (!pci_dev_run_wake(pdev)) mei_me_set_pm_domain(dev); if (mei_pg_is_enabled(dev)) pm_runtime_put_noidle(&pdev->dev); dev_dbg(&pdev->dev, "initialization successful.\n"); return 0; release_irq: mei_cancel_work(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); disable_msi: pci_disable_msi(pdev); pci_iounmap(pdev, hw->mem_addr); free_device: kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; }
static int __devinit ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ddb *dev; int stat = 0; int irq_flag = IRQF_SHARED; if (pci_enable_device(pdev) < 0) return -ENODEV; dev = vzalloc(sizeof(struct ddb)); if (dev == NULL) return -ENOMEM; dev->has_dma = 1; dev->pdev = pdev; dev->dev = &pdev->dev; pci_set_drvdata(pdev, dev); dev->ids.vendor = id->vendor; dev->ids.device = id->device; dev->ids.subvendor = id->subvendor; dev->ids.subdevice = id->subdevice; dev->info = (struct ddb_info *) id->driver_data; pr_info("DDBridge driver detected: %s\n", dev->info->name); dev->regs_len = pci_resource_len(dev->pdev, 0); dev->regs = ioremap(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0)); if (!dev->regs) { pr_err("DDBridge: not enough memory for register map\n"); stat = -ENOMEM; goto fail; } if (ddbreadl(dev, 0) == 0xffffffff) { pr_err("DDBridge: cannot read registers\n"); stat = -ENODEV; goto fail; } dev->ids.hwid = ddbreadl(dev, 0); dev->ids.regmapid = ddbreadl(dev, 4); pr_info("HW %08x REGMAP %08x\n", dev->ids.hwid, dev->ids.regmapid); ddbwritel(dev, 0x00000000, INTERRUPT_ENABLE); ddbwritel(dev, 0x00000000, MSI1_ENABLE); ddbwritel(dev, 0x00000000, MSI2_ENABLE); ddbwritel(dev, 0x00000000, MSI3_ENABLE); ddbwritel(dev, 0x00000000, MSI4_ENABLE); ddbwritel(dev, 0x00000000, MSI5_ENABLE); ddbwritel(dev, 0x00000000, MSI6_ENABLE); ddbwritel(dev, 0x00000000, MSI7_ENABLE); #ifdef CONFIG_PCI_MSI if (msi && pci_msi_enabled()) { stat = pci_enable_msi_block(dev->pdev, 2); if (stat == 0) { dev->msi = 1; pr_info("DDBrige using 2 MSI interrupts\n"); } if (stat == 1) stat = pci_enable_msi(dev->pdev); if (stat < 0) { pr_info(": MSI not available.\n"); } else { irq_flag = 0; dev->msi++; } } if (dev->msi == 2) { stat = request_irq(dev->pdev->irq, irq_handler0, irq_flag, "ddbridge", (void *) dev); if (stat < 0) goto fail0; stat = request_irq(dev->pdev->irq + 1, irq_handler1, irq_flag, "ddbridge", (void *) dev); if (stat < 0) { free_irq(dev->pdev->irq, dev); goto fail0; } } else #endif { #ifdef DDB_TEST_THREADED stat = request_threaded_irq(dev->pdev->irq, irq_handler, irq_thread, irq_flag, "ddbridge", (void *) dev); #else stat = request_irq(dev->pdev->irq, irq_handler, irq_flag, "ddbridge", (void *) dev); #endif if (stat < 0) goto fail0; } ddbwritel(dev, 0, DMA_BASE_READ); if (dev->info->type != DDB_MOD) ddbwritel(dev, 0, DMA_BASE_WRITE); /*ddbwritel(dev, 0xffffffff, INTERRUPT_ACK);*/ if (dev->msi == 2) { ddbwritel(dev, 0x0fffff00, INTERRUPT_ENABLE); ddbwritel(dev, 0x0000000f, MSI1_ENABLE); } else { ddbwritel(dev, 0x0fffff0f, INTERRUPT_ENABLE); ddbwritel(dev, 0x00000000, MSI1_ENABLE); } if (ddb_i2c_init(dev) < 0) goto fail1; ddb_ports_init(dev); if (ddb_buffers_alloc(dev) < 0) { pr_info(": Could not allocate buffer memory\n"); goto fail2; } if (ddb_ports_attach(dev) < 0) goto fail3; /* ignore if this fails */ ddb_device_create(dev); if (dev->info->fan_num) { ddbwritel(dev, 1, GPIO_DIRECTION); ddbwritel(dev, 1, GPIO_OUTPUT); } if (dev->info->type == DDB_MOD) ddbridge_mod_init(dev); return 0; fail3: ddb_ports_detach(dev); pr_err("fail3\n"); ddb_ports_release(dev); fail2: pr_err("fail2\n"); ddb_buffers_free(dev); ddb_i2c_release(dev); fail1: pr_err("fail1\n"); ddbwritel(dev, 0, INTERRUPT_ENABLE); ddbwritel(dev, 0, MSI1_ENABLE); free_irq(dev->pdev->irq, dev); if (dev->msi == 2) free_irq(dev->pdev->irq + 1, dev); fail0: pr_err("fail0\n"); if (dev->msi) pci_disable_msi(dev->pdev); fail: pr_err("fail\n"); ddb_unmap(dev); pci_set_drvdata(pdev, 0); pci_disable_device(pdev); return -1; }
static int __devinit atomisp_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct atomisp_device *isp = NULL; unsigned int start, len; void __iomem *base = NULL; int err = 0; if (!dev) { v4l2_err(&atomisp_dev, "atomisp: erorr device ptr\n"); return -EINVAL; } atomisp_pci_vendor = id->vendor; atomisp_pci_device = id->device; err = pci_enable_device(dev); if (err) { v4l2_err(&atomisp_dev, "Failed to enable CI ISP device\n"); return err; } start = pci_resource_start(dev, 0); len = pci_resource_len(dev, 0); err = pci_request_region(dev, 0, atomisp_pci_driver.name); if (err) { v4l2_err(&atomisp_dev, "Failed to request region 0x%1x-0x%Lx\n", start, (unsigned long long)pci_resource_end(dev, 0)); goto request_region_fail; } base = ioremap_nocache(start, len); if (!base) { v4l2_err(&atomisp_dev, "Failed to I/O memory remapping\n"); err = -ENOMEM; goto ioremap_fail; } isp = kzalloc(sizeof(struct atomisp_device), GFP_KERNEL); if (!isp) { v4l2_err(&atomisp_dev, "Failed to alloc CI ISP structure\n"); goto kzalloc_fail; } isp->sw_contex.probed = false; isp->sw_contex.init = false; isp->pdev = dev; isp->dev = &dev->dev; isp->sw_contex.power_state = ATOM_ISP_POWER_UP; isp->hw_contex.pci_root = pci_get_bus_and_slot(0, 0); /* Load isp firmware from user space */ /* * fixing me: * MRFLD VP does not use firmware loading * from file system */ if (!IS_MRFLD) { isp->firmware = load_firmware(&dev->dev); if (!isp->firmware) { v4l2_err(&atomisp_dev, "Load firmwares failed\n"); goto load_fw_fail; } } err = atomisp_initialize_modules(isp); if (err < 0) { v4l2_err(&atomisp_dev, "atomisp_initialize_modules\n"); goto init_mod_fail; } err = atomisp_register_entities(isp); if (err < 0) { v4l2_err(&atomisp_dev, "atomisp_register_entities failed\n"); goto init_mod_fail; } init_completion(&isp->wq_frame_complete); init_completion(&isp->dis_state_complete); spin_lock_init(&isp->irq_lock); isp->work_queue = create_singlethread_workqueue(isp->v4l2_dev.name); if (isp->work_queue == NULL) { v4l2_err(&atomisp_dev, "Failed to initialize work queue\n"); goto work_queue_fail; } INIT_WORK(&isp->work, atomisp_work); isp->hw_contex.ispmmadr = start; pci_set_master(dev); atomisp_io_base = base; isp->tvnorm = tvnorms; mutex_init(&isp->input_lock); /* isp_lock is to protect race access of css functions */ mutex_init(&isp->isp_lock); isp->sw_contex.updating_uptr = false; isp->isp3a_stat_ready = false; pci_set_drvdata(dev, isp); err = pci_enable_msi(dev); if (err) { v4l2_err(&atomisp_dev, "Failed to enable msi\n"); goto enable_msi_fail; } err = request_irq(dev->irq, atomisp_isr, IRQF_SHARED, "isp_irq", isp); if (err) { v4l2_err(&atomisp_dev, "Failed to request irq\n"); goto request_irq_fail; } setup_timer(&isp->wdt, atomisp_wdt_wakeup_dog, (unsigned long)isp); atomisp_msi_irq_init(isp, dev); pm_qos_add_request(&isp->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); /* * fixing me! * MRFLD VP does not implement * PM Core */ #ifdef CONFIG_PM if (!IS_MRFLD) { pm_runtime_put_noidle(&dev->dev); pm_runtime_allow(&dev->dev); } #endif isp->sw_contex.probed = true; err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED); if (err) v4l2_err(&atomisp_dev, "Failed to register reserved memory pool.\n"); return 0; request_irq_fail: pci_disable_msi(dev); enable_msi_fail: pci_set_drvdata(dev, NULL); destroy_workqueue(isp->work_queue); work_queue_fail: atomisp_unregister_entities(isp); init_mod_fail: release_firmware(isp->firmware); load_fw_fail: kfree(isp); kzalloc_fail: iounmap(base); ioremap_fail: pci_release_region(dev, 0); request_region_fail: pci_disable_device(dev); return err; }
static int ndev_init_isr(struct amd_ntb_dev *ndev, int msix_min, int msix_max) { struct pci_dev *pdev; int rc, i, msix_count, node; pdev = ndev->ntb.pdev; node = dev_to_node(&pdev->dev); ndev->db_mask = ndev->db_valid_mask; /* Try to set up msix irq */ ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec), GFP_KERNEL, node); if (!ndev->vec) goto err_msix_vec_alloc; ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix), GFP_KERNEL, node); if (!ndev->msix) goto err_msix_alloc; for (i = 0; i < msix_max; ++i) ndev->msix[i].entry = i; msix_count = pci_enable_msix_range(pdev, ndev->msix, msix_min, msix_max); if (msix_count < 0) goto err_msix_enable; /* NOTE: Disable MSIX if msix count is less than 16 because of * hardware limitation. */ if (msix_count < msix_min) { pci_disable_msix(pdev); goto err_msix_enable; } for (i = 0; i < msix_count; ++i) { ndev->vec[i].ndev = ndev; ndev->vec[i].num = i; rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0, "ndev_vec_isr", &ndev->vec[i]); if (rc) goto err_msix_request; } dev_dbg(&pdev->dev, "Using msix interrupts\n"); ndev->db_count = msix_min; ndev->msix_vec_count = msix_max; return 0; err_msix_request: while (i-- > 0) free_irq(ndev->msix[i].vector, &ndev->vec[i]); pci_disable_msix(pdev); err_msix_enable: kfree(ndev->msix); err_msix_alloc: kfree(ndev->vec); err_msix_vec_alloc: ndev->msix = NULL; ndev->vec = NULL; /* Try to set up msi irq */ rc = pci_enable_msi(pdev); if (rc) goto err_msi_enable; rc = request_irq(pdev->irq, ndev_irq_isr, 0, "ndev_irq_isr", ndev); if (rc) goto err_msi_request; dev_dbg(&pdev->dev, "Using msi interrupts\n"); ndev->db_count = 1; ndev->msix_vec_count = 1; return 0; err_msi_request: pci_disable_msi(pdev); err_msi_enable: /* Try to set up intx irq */ pci_intx(pdev, 1); rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED, "ndev_irq_isr", ndev); if (rc) goto err_intx_request; dev_dbg(&pdev->dev, "Using intx interrupts\n"); ndev->db_count = 1; ndev->msix_vec_count = 1; return 0; err_intx_request: return rc; }
static int bcwc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *entry) { struct bcwc_private *dev_priv; int ret; dev_info(&pdev->dev, "Found Broadcom PCIe webcam with device id: %x\n", pdev->device); dev_priv = kzalloc(sizeof(struct bcwc_private), GFP_KERNEL); if (!dev_priv) { dev_err(&pdev->dev, "Failed to allocate memory\n"); return -ENOMEM; } dev_priv->pdev = pdev; ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "Failed to enable device\n"); goto fail_free; } ret = bcwc_pci_reserve_mem(dev_priv); if (ret) goto fail_enable; ret = pci_enable_msi(pdev); if (ret) { dev_err(&pdev->dev, "Failed to enable MSI\n"); goto fail_enable; } INIT_WORK(&dev_priv->irq_work, bcwc_irq_work); ret = bcwc_irq_enable(dev_priv); if (ret) goto fail_msi; ret = bcwc_pci_set_dma_mask(dev_priv, 64); if (ret) ret = bcwc_pci_set_dma_mask(dev_priv, 32); if (ret) goto fail_msi; dev_info(&pdev->dev, "Setting %ubit DMA mask\n", dev_priv->dma_mask); pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dev_priv->dma_mask)); pci_set_master(pdev); pci_set_drvdata(pdev, dev_priv); dev_priv->ddr_model = 4; dev_priv->ddr_speed = 450; bcwc_hw_init(dev_priv); return 0; fail_msi: pci_disable_msi(pdev); fail_enable: pci_disable_device(pdev); fail_free: kfree(dev_priv); return ret; }
static int c_can_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data; struct c_can_priv *priv; struct net_device *dev; void __iomem *addr; int ret; ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_device FAILED\n"); goto out; } ret = pci_request_regions(pdev, KBUILD_MODNAME); if (ret) { dev_err(&pdev->dev, "pci_request_regions FAILED\n"); goto out_disable_device; } ret = pci_enable_msi(pdev); if (!ret) { dev_info(&pdev->dev, "MSI enabled\n"); pci_set_master(pdev); } addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!addr) { dev_err(&pdev->dev, "device has no PCI memory resources, " "failing adapter\n"); ret = -ENOMEM; goto out_release_regions; } /* allocate the c_can device */ dev = alloc_c_can_dev(); if (!dev) { ret = -ENOMEM; goto out_iounmap; } priv = netdev_priv(dev); pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->irq = pdev->irq; priv->base = addr; if (!c_can_pci_data->freq) { dev_err(&pdev->dev, "no clock frequency defined\n"); ret = -ENODEV; goto out_free_c_can; } else { priv->can.clock.freq = c_can_pci_data->freq; } /* Configure CAN type */ switch (c_can_pci_data->type) { case BOSCH_C_CAN: priv->regs = reg_map_c_can; break; case BOSCH_D_CAN: priv->regs = reg_map_d_can; priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; break; default: ret = -EINVAL; goto out_free_c_can; } priv->type = c_can_pci_data->type; /* Configure access to registers */ switch (c_can_pci_data->reg_align) { case C_CAN_REG_ALIGN_32: priv->read_reg = c_can_pci_read_reg_aligned_to_32bit; priv->write_reg = c_can_pci_write_reg_aligned_to_32bit; break; case C_CAN_REG_ALIGN_16: priv->read_reg = c_can_pci_read_reg_aligned_to_16bit; priv->write_reg = c_can_pci_write_reg_aligned_to_16bit; break; default: ret = -EINVAL; goto out_free_c_can; } ret = register_c_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", KBUILD_MODNAME, ret); goto out_free_c_can; } dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", KBUILD_MODNAME, priv->regs, dev->irq); return 0; out_free_c_can: free_c_can_dev(dev); out_iounmap: pci_iounmap(pdev, addr); out_release_regions: pci_disable_msi(pdev); pci_clear_master(pdev); pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: return ret; }
/* ======================================================================== Routine Description: Disable MSI function for PCI. Arguments: pDev - PCI device Return Value: None Note: ======================================================================== */ VOID RtmpOsPciMsiDisable(VOID *pDev) { #ifdef PCI_MSI_SUPPORT pci_disable_msi(pDev); #endif }
int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; ret = i915_gem_suspend(dev); if (ret) { DRM_ERROR("failed to idle hardware: %d\n", ret); return ret; } intel_power_domains_fini(dev_priv); intel_gpu_ips_teardown(); i915_teardown_sysfs(dev); WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); unregister_shrinker(&dev_priv->mm.shrinker); io_mapping_free(dev_priv->gtt.mappable); arch_phys_wc_del(dev_priv->gtt.mtrr); acpi_video_unregister(); if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_fbdev_fini(dev); drm_vblank_cleanup(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_modeset_cleanup(dev); /* * free the memory space allocated for the child device * config parsed from VBT */ if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { kfree(dev_priv->vbt.child_dev); dev_priv->vbt.child_dev = NULL; dev_priv->vbt.child_dev_num = 0; } vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, NULL, NULL, NULL); } /* Free error state after interrupts are fully disabled. */ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_work_sync(&dev_priv->gpu_error.work); i915_destroy_error_state(dev); if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); intel_opregion_fini(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { /* Flush any outstanding unpin_work. */ flush_workqueue(dev_priv->wq); mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); i915_gem_cleanup_stolen(dev); } intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); destroy_workqueue(dev_priv->dp_wq); destroy_workqueue(dev_priv->wq); pm_qos_remove_request(&dev_priv->pm_qos); i915_global_gtt_cleanup(dev); intel_uncore_fini(dev); if (dev_priv->regs != NULL) pci_iounmap(dev->pdev, dev_priv->regs); if (dev_priv->slab) kmem_cache_destroy(dev_priv->slab); pci_dev_put(dev_priv->bridge_dev); kfree(dev_priv); return 0; }
void qib_nomsi(struct qib_devdata *dd) { dd->msi_lo = 0; pci_disable_msi(dd->pcidev); }
/** * \brief Setup interrupt for octeon device * @param oct octeon device * * Enable interrupt in Octeon device as given in the PCI interrupt mask. */ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) { struct msix_entry *msix_entries; char *queue_irq_names = NULL; int i, num_interrupts = 0; int num_alloc_ioq_vectors; char *aux_irq_name = NULL; int num_ioq_vectors; int irqret, err; if (oct->msix_on) { oct->num_msix_irqs = num_ioqs; if (OCTEON_CN23XX_PF(oct)) { num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1; /* one non ioq interrupt for handling * sli_mac_pf_int_sum */ oct->num_msix_irqs += 1; } else if (OCTEON_CN23XX_VF(oct)) { num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF; } /* allocate storage for the names assigned to each irq */ oct->irq_name_storage = kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL); if (!oct->irq_name_storage) { dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); return -ENOMEM; } queue_irq_names = oct->irq_name_storage; if (OCTEON_CN23XX_PF(oct)) aux_irq_name = &queue_irq_names [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)]; oct->msix_entries = kcalloc(oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); if (!oct->msix_entries) { dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); kfree(oct->irq_name_storage); oct->irq_name_storage = NULL; return -ENOMEM; } msix_entries = (struct msix_entry *)oct->msix_entries; /*Assumption is that pf msix vectors start from pf srn to pf to * trs and not from 0. if not change this code */ if (OCTEON_CN23XX_PF(oct)) { for (i = 0; i < oct->num_msix_irqs - 1; i++) msix_entries[i].entry = oct->sriov_info.pf_srn + i; msix_entries[oct->num_msix_irqs - 1].entry = oct->sriov_info.trs; } else if (OCTEON_CN23XX_VF(oct)) { for (i = 0; i < oct->num_msix_irqs; i++) msix_entries[i].entry = i; } num_alloc_ioq_vectors = pci_enable_msix_range( oct->pci_dev, msix_entries, oct->num_msix_irqs, oct->num_msix_irqs); if (num_alloc_ioq_vectors < 0) { dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); kfree(oct->msix_entries); oct->msix_entries = NULL; kfree(oct->irq_name_storage); oct->irq_name_storage = NULL; return num_alloc_ioq_vectors; } dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); num_ioq_vectors = oct->num_msix_irqs; /** For PF, there is one non-ioq interrupt handler */ if (OCTEON_CN23XX_PF(oct)) { num_ioq_vectors -= 1; snprintf(aux_irq_name, INTRNAMSIZ, "LiquidIO%u-pf%u-aux", oct->octeon_id, oct->pf_num); irqret = request_irq( msix_entries[num_ioq_vectors].vector, liquidio_legacy_intr_handler, 0, aux_irq_name, oct); if (irqret) { dev_err(&oct->pci_dev->dev, "Request_irq failed for MSIX interrupt Error: %d\n", irqret); pci_disable_msix(oct->pci_dev); kfree(oct->msix_entries); kfree(oct->irq_name_storage); oct->irq_name_storage = NULL; oct->msix_entries = NULL; return irqret; } } for (i = 0 ; i < num_ioq_vectors ; i++) { if (OCTEON_CN23XX_PF(oct)) snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u", oct->octeon_id, oct->pf_num, i); if (OCTEON_CN23XX_VF(oct)) snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u", oct->octeon_id, oct->vf_num, i); irqret = request_irq(msix_entries[i].vector, liquidio_msix_intr_handler, 0, &queue_irq_names[IRQ_NAME_OFF(i)], &oct->ioq_vector[i]); if (irqret) { dev_err(&oct->pci_dev->dev, "Request_irq failed for MSIX interrupt Error: %d\n", irqret); /** Freeing the non-ioq irq vector here . */ free_irq(msix_entries[num_ioq_vectors].vector, oct); while (i) { i--; /** clearing affinity mask. */ irq_set_affinity_hint( msix_entries[i].vector, NULL); free_irq(msix_entries[i].vector, &oct->ioq_vector[i]); } pci_disable_msix(oct->pci_dev); kfree(oct->msix_entries); kfree(oct->irq_name_storage); oct->irq_name_storage = NULL; oct->msix_entries = NULL; return irqret; } oct->ioq_vector[i].vector = msix_entries[i].vector; /* assign the cpu mask for this msix interrupt vector */ irq_set_affinity_hint(msix_entries[i].vector, &oct->ioq_vector[i].affinity_mask ); } dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n", oct->octeon_id); } else { err = pci_enable_msi(oct->pci_dev); if (err) dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n", err); else oct->flags |= LIO_FLAG_MSI_ENABLED; /* allocate storage for the names assigned to the irq */ oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL); if (!oct->irq_name_storage) return -ENOMEM; queue_irq_names = oct->irq_name_storage; if (OCTEON_CN23XX_PF(oct)) snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u", oct->octeon_id, oct->pf_num, 0); if (OCTEON_CN23XX_VF(oct)) snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u", oct->octeon_id, oct->vf_num, 0); irqret = request_irq(oct->pci_dev->irq, liquidio_legacy_intr_handler, IRQF_SHARED, &queue_irq_names[IRQ_NAME_OFF(0)], oct); if (irqret) { if (oct->flags & LIO_FLAG_MSI_ENABLED) pci_disable_msi(oct->pci_dev); dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", irqret); kfree(oct->irq_name_storage); oct->irq_name_storage = NULL; return irqret; } } return 0; }
int _aac_rx_init(struct aac_dev *dev) { unsigned long start; unsigned long status; int restart = 0; int instance = dev->id; const char * name = dev->name; if (aac_adapter_ioremap(dev, dev->base_size)) { printk(KERN_WARNING "%s: unable to map adapter.\n", name); goto error_iounmap; } /* Failure to reset here is an option ... */ dev->a_ops.adapter_sync_cmd = rx_sync_cmd; dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt; dev->OIMR = status = rx_readb (dev, MUnit.OIMR); if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) && !aac_rx_restart_adapter(dev, 0)) /* Make sure the Hardware FIFO is empty */ while ((++restart < 512) && (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL)); /* * Check to see if the board panic'd while booting. */ status = rx_readl(dev, MUnit.OMRx[0]); if (status & KERNEL_PANIC) { if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev))) goto error_iounmap; ++restart; } /* * Check to see if the board failed any self tests. */ status = rx_readl(dev, MUnit.OMRx[0]); if (status & SELF_TEST_FAILED) { printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); goto error_iounmap; } /* * Check to see if the monitor panic'd while booting. */ if (status & MONITOR_PANIC) { printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); goto error_iounmap; } start = jiffies; /* * Wait for the adapter to be up and running. Wait up to 3 minutes */ while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING)) { if ((restart && (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || time_after(jiffies, start+HZ*startup_timeout)) { printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", dev->name, instance, status); goto error_iounmap; } if (!restart && ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || time_after(jiffies, start + HZ * ((startup_timeout > 60) ? (startup_timeout - 60) : (startup_timeout / 2))))) { if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))) start = jiffies; ++restart; } msleep(1); } if (restart && aac_commit) aac_commit = 1; /* * Fill in the common function dispatch table. */ dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; dev->a_ops.adapter_notify = aac_rx_notify_adapter; dev->a_ops.adapter_sync_cmd = rx_sync_cmd; dev->a_ops.adapter_check_health = aac_rx_check_health; dev->a_ops.adapter_restart = aac_rx_restart_adapter; /* * First clear out all interrupts. Then enable the one's that we * can handle. */ aac_adapter_comm(dev, AAC_COMM_PRODUCER); aac_adapter_disable_int(dev); rx_writel(dev, MUnit.ODR, 0xffffffff); aac_adapter_enable_int(dev); if (aac_init_adapter(dev) == NULL) goto error_iounmap; aac_adapter_comm(dev, dev->comm_interface); dev->msi = aac_msi && !pci_enable_msi(dev->pdev); if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { if (dev->msi) pci_disable_msi(dev->pdev); printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance); goto error_iounmap; } aac_adapter_enable_int(dev); /* * Tell the adapter that all is configured, and it can * start accepting requests */ aac_rx_start_adapter(dev); return 0; error_iounmap: return -1; }
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); struct iwl_bus *bus; struct iwl_pci_bus *pci_bus; u16 pci_cmd; int err; bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL); if (!bus) { dev_printk(KERN_ERR, &pdev->dev, "Couldn't allocate iwl_pci_bus"); err = -ENOMEM; goto out_no_pci; } pci_bus = IWL_BUS_GET_PCI_BUS(bus); pci_bus->pci_dev = pdev; /* W/A - seems to solve weird behavior. We need to remove this if we * don't want to stay in L1 all the time. This wastes a lot of power */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); if (pci_enable_device(pdev)) { err = -ENODEV; goto out_no_pci; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (err) { dev_printk(KERN_ERR, bus->dev, "No suitable DMA available.\n"); goto out_pci_disable_device; } } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed"); goto out_pci_disable_device; } pci_bus->hw_base = pci_iomap(pdev, 0, 0); if (!pci_bus->hw_base) { dev_printk(KERN_ERR, bus->dev, "pci_iomap failed"); err = -ENODEV; goto out_pci_release_regions; } dev_printk(KERN_INFO, &pdev->dev, "pci_resource_len = 0x%08llx\n", (unsigned long long) pci_resource_len(pdev, 0)); dev_printk(KERN_INFO, &pdev->dev, "pci_resource_base = %p\n", pci_bus->hw_base); dev_printk(KERN_INFO, &pdev->dev, "HW Revision ID = 0x%X\n", pdev->revision); /* We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); err = pci_enable_msi(pdev); if (err) { dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed"); goto out_iounmap; } /* TODO: Move this away, not needed if not MSI */ /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); } bus->dev = &pdev->dev; bus->irq = pdev->irq; bus->ops = &pci_ops; err = iwl_probe(bus, cfg); if (err) goto out_disable_msi; return 0; out_disable_msi: pci_disable_msi(pdev); out_iounmap: pci_iounmap(pdev, pci_bus->hw_base); out_pci_release_regions: pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); out_pci_disable_device: pci_disable_device(pdev); out_no_pci: kfree(bus); return err; }
/** * mei_txe_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in mei_txe_pci_tbl * * Return: 0 on success, <0 on failure. */ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct mei_device *dev; struct mei_txe_hw *hw; int err; int i; /* enable pci dev */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No suitable DMA available.\n"); goto release_regions; } } /* allocates and initializes the mei dev structure */ dev = mei_txe_dev_init(pdev); if (!dev) { err = -ENOMEM; goto release_regions; } hw = to_txe_hw(dev); /* mapping IO device memory */ for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { hw->mem_addr[i] = pci_iomap(pdev, i, 0); if (!hw->mem_addr[i]) { dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } } pci_enable_msi(pdev); /* clear spurious interrupts */ mei_clear_interrupts(dev); /* request and enable interrupt */ if (pci_dev_msi_enabled(pdev)) err = request_threaded_irq(pdev->irq, NULL, mei_txe_irq_thread_handler, IRQF_ONESHOT, KBUILD_MODNAME, dev); else err = request_threaded_irq(pdev->irq, mei_txe_irq_quick_handler, mei_txe_irq_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", pdev->irq); goto free_device; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; goto release_irq; } pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); err = mei_register(dev, &pdev->dev); if (err) goto stop; pci_set_drvdata(pdev, dev); /* * For not wake-able HW runtime pm framework * can't be used on pci device level. * Use domain runtime pm callbacks instead. */ if (!pci_dev_run_wake(pdev)) mei_txe_set_pm_domain(dev); pm_runtime_put_noidle(&pdev->dev); return 0; stop: mei_stop(dev); release_irq: mei_cancel_work(dev); /* disable interrupts */ mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); free_device: mei_txe_pci_iounmap(pdev, hw); kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; }
void bm_pci_stop(bm_pci_device_t* pci) { if (pci->pdev->msi_enabled) pci_disable_msi(pci->pdev); pci_disable_device(pci->pdev); }
static int kp2000_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err = 0; struct kp2000_device *pcard; int rv; unsigned long reg_bar_phys_addr; unsigned long reg_bar_phys_len; unsigned long dma_bar_phys_addr; unsigned long dma_bar_phys_len; u16 regval; dev_dbg(&pdev->dev, "kp2000_pcie_probe(pdev = [%p], id = [%p])\n", pdev, id); /* * Step 1: Allocate a struct for the pcard */ pcard = kzalloc(sizeof(struct kp2000_device), GFP_KERNEL); if (NULL == pcard) { dev_err(&pdev->dev, "probe: failed to allocate private card data\n"); return -ENOMEM; } dev_dbg(&pdev->dev, "probe: allocated struct kp2000_device @ %p\n", pcard); /* * Step 2: Initialize trivial pcard elements */ err = ida_simple_get(&card_num_ida, 1, INT_MAX, GFP_KERNEL); if (err < 0) { dev_err(&pdev->dev, "probe: failed to get card number (%d)\n", err); goto out2; } pcard->card_num = err; scnprintf(pcard->name, 16, "kpcard%u", pcard->card_num); mutex_init(&pcard->sem); mutex_lock(&pcard->sem); pcard->pdev = pdev; pci_set_drvdata(pdev, pcard); /* * Step 3: Enable PCI device */ err = pci_enable_device(pcard->pdev); if (err) { dev_err(&pcard->pdev->dev, "probe: failed to enable PCIE2000 PCIe device (%d)\n", err); goto out3; } /* * Step 4: Setup the Register BAR */ reg_bar_phys_addr = pci_resource_start(pcard->pdev, REG_BAR); reg_bar_phys_len = pci_resource_len(pcard->pdev, REG_BAR); pcard->regs_bar_base = ioremap_nocache(reg_bar_phys_addr, PAGE_SIZE); if (NULL == pcard->regs_bar_base) { dev_err(&pcard->pdev->dev, "probe: REG_BAR could not remap memory to virtual space\n"); err = -ENODEV; goto out4; } dev_dbg(&pcard->pdev->dev, "probe: REG_BAR virt hardware address start [%p]\n", pcard->regs_bar_base); err = pci_request_region(pcard->pdev, REG_BAR, KP_DRIVER_NAME_KP2000); if (err) { iounmap(pcard->regs_bar_base); dev_err(&pcard->pdev->dev, "probe: failed to acquire PCI region (%d)\n", err); err = -ENODEV; goto out4; } pcard->regs_base_resource.start = reg_bar_phys_addr; pcard->regs_base_resource.end = reg_bar_phys_addr + reg_bar_phys_len - 1; pcard->regs_base_resource.flags = IORESOURCE_MEM; /* * Step 5: Setup the DMA BAR */ dma_bar_phys_addr = pci_resource_start(pcard->pdev, DMA_BAR); dma_bar_phys_len = pci_resource_len(pcard->pdev, DMA_BAR); pcard->dma_bar_base = ioremap_nocache(dma_bar_phys_addr, dma_bar_phys_len); if (NULL == pcard->dma_bar_base) { dev_err(&pcard->pdev->dev, "probe: DMA_BAR could not remap memory to virtual space\n"); err = -ENODEV; goto out5; } dev_dbg(&pcard->pdev->dev, "probe: DMA_BAR virt hardware address start [%p]\n", pcard->dma_bar_base); pcard->dma_common_regs = pcard->dma_bar_base + KPC_DMA_COMMON_OFFSET; err = pci_request_region(pcard->pdev, DMA_BAR, "kp2000_pcie"); if (err) { iounmap(pcard->dma_bar_base); dev_err(&pcard->pdev->dev, "probe: failed to acquire PCI region (%d)\n", err); err = -ENODEV; goto out5; } pcard->dma_base_resource.start = dma_bar_phys_addr; pcard->dma_base_resource.end = dma_bar_phys_addr + dma_bar_phys_len - 1; pcard->dma_base_resource.flags = IORESOURCE_MEM; /* * Step 6: System Regs */ pcard->sysinfo_regs_base = pcard->regs_bar_base; err = read_system_regs(pcard); if (err) goto out6; // Disable all "user" interrupts because they're not used yet. writeq(0xFFFFFFFFFFFFFFFF, pcard->sysinfo_regs_base + REG_INTERRUPT_MASK); /* * Step 7: Configure PCI thingies */ // let the card master PCIe pci_set_master(pcard->pdev); // enable IO and mem if not already done pci_read_config_word(pcard->pdev, PCI_COMMAND, ®val); regval |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY); pci_write_config_word(pcard->pdev, PCI_COMMAND, regval); // Clear relaxed ordering bit pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN, 0); // Set Max_Payload_Size and Max_Read_Request_Size regval = (0x0) << 5; // Max_Payload_Size = 128 B pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_PAYLOAD, regval); regval = (0x0) << 12; // Max_Read_Request_Size = 128 B pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, regval); // Enable error reporting for: Correctable Errors, Non-Fatal Errors, // Fatal Errors, Unsupported Requests pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL, 0, PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); err = dma_set_mask(PCARD_TO_DEV(pcard), DMA_BIT_MASK(64)); if (err) { dev_err(&pcard->pdev->dev, "CANNOT use DMA mask %0llx\n", DMA_BIT_MASK(64)); goto out7; } dev_dbg(&pcard->pdev->dev, "Using DMA mask %0llx\n", dma_get_mask(PCARD_TO_DEV(pcard))); /* * Step 8: Configure IRQs */ err = pci_enable_msi(pcard->pdev); if (err < 0) goto out8a; rv = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED, pcard->name, pcard); if (rv) { dev_err(&pcard->pdev->dev, "kp2000_pcie_probe: failed to request_irq: %d\n", rv); goto out8b; } /* * Step 9: Setup sysfs attributes */ err = sysfs_create_files(&(pdev->dev.kobj), kp_attr_list); if (err) { dev_err(&pdev->dev, "Failed to add sysfs files: %d\n", err); goto out9; } /* * Step 10: Setup misc device */ pcard->miscdev.minor = MISC_DYNAMIC_MINOR; pcard->miscdev.fops = &kp2000_fops; pcard->miscdev.parent = &pcard->pdev->dev; pcard->miscdev.name = pcard->name; err = misc_register(&pcard->miscdev); if (err) { dev_err(&pcard->pdev->dev, "kp2000_pcie_probe: misc_register failed: %d\n", err); goto out10; } /* * Step 11: Probe cores */ err = kp2000_probe_cores(pcard); if (err) goto out11; /* * Step 12: Enable IRQs in HW */ writel(KPC_DMA_CARD_IRQ_ENABLE | KPC_DMA_CARD_USER_INTERRUPT_MODE, pcard->dma_common_regs); dev_dbg(&pcard->pdev->dev, "kp2000_pcie_probe() complete!\n"); mutex_unlock(&pcard->sem); return 0; out11: misc_deregister(&pcard->miscdev); out10: sysfs_remove_files(&(pdev->dev.kobj), kp_attr_list); out9: free_irq(pcard->pdev->irq, pcard); out8b: pci_disable_msi(pcard->pdev); out8a: out7: out6: iounmap(pcard->dma_bar_base); pci_release_region(pdev, DMA_BAR); pcard->dma_bar_base = NULL; out5: iounmap(pcard->regs_bar_base); pci_release_region(pdev, REG_BAR); pcard->regs_bar_base = NULL; out4: pci_disable_device(pcard->pdev); out3: mutex_unlock(&pcard->sem); ida_simple_remove(&card_num_ida, pcard->card_num); out2: kfree(pcard); return err; }