static void kp2000_pcie_remove(struct pci_dev *pdev) { struct kp2000_device *pcard = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "kp2000_pcie_remove(pdev=%p)\n", pdev); if (pcard == NULL) return; mutex_lock(&pcard->sem); kp2000_remove_cores(pcard); mfd_remove_devices(PCARD_TO_DEV(pcard)); misc_deregister(&pcard->miscdev); sysfs_remove_files(&(pdev->dev.kobj), kp_attr_list); free_irq(pcard->pdev->irq, pcard); pci_disable_msi(pcard->pdev); if (pcard->dma_bar_base != NULL) { iounmap(pcard->dma_bar_base); pci_release_region(pdev, DMA_BAR); pcard->dma_bar_base = NULL; } if (pcard->regs_bar_base != NULL) { iounmap(pcard->regs_bar_base); pci_release_region(pdev, REG_BAR); pcard->regs_bar_base = NULL; } pci_disable_device(pcard->pdev); pci_set_drvdata(pdev, NULL); mutex_unlock(&pcard->sem); ida_simple_remove(&card_num_ida, pcard->card_num); kfree(pcard); }
static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev) { if (pci_request_region(dev, 2, "priv 2 regs")) goto err1; if (pci_request_region(dev, 0, "priv 1 regs")) goto err2; pr_devel("cxl_map_adapter_regs: p1: %#.16llx %#llx, p2: %#.16llx %#llx", p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev)); if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) goto err3; if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) goto err4; return 0; err4: iounmap(adapter->p1_mmio); adapter->p1_mmio = NULL; err3: pci_release_region(dev, 0); err2: pci_release_region(dev, 2); err1: return -ENOMEM; }
static void __devexit intel_mid_i2s_remove(struct pci_dev *pdev) { struct intel_mid_i2s_hdl *drv_data; pm_runtime_get_noresume(&pdev->dev); pm_runtime_forbid(&pdev->dev); drv_data = pci_get_drvdata(pdev); if (!drv_data) { dev_err(&pdev->dev, "no drv_data in pci device to remove!\n"); goto leave; } if (test_bit(I2S_PORT_OPENED, &drv_data->flags)) { dev_warn(&pdev->dev, "Not closed before removing pci_dev!\n"); intel_mid_i2s_close(drv_data); } pci_set_drvdata(pdev, NULL); /* Stop DMA is already done during close() */ pci_dev_put(drv_data->dmac1); /* Disable the SSP at the peripheral and SOC level */ write_SSCR0(0, drv_data->ioaddr); free_irq(drv_data->irq, drv_data); iounmap(drv_data->ioaddr); pci_release_region(pdev, MRST_SSP_BAR); pci_release_region(pdev, MRST_LPE_BAR); pci_disable_device(pdev); kfree(drv_data); leave: return; }
static void intel_mcu_remove(struct pci_dev *pdev) { struct mcu *mcu; mcu = dev_get_drvdata(&pdev->dev); iounmap((void __iomem *)mcu->ddr[0]); iounmap((void __iomem *)mcu->ddr[1]); pci_release_region(pdev, 0); pci_release_region(pdev, 1); pci_dev_put(pdev); }
static void __devexit asd_unmap_memio(struct asd_ha_struct *asd_ha) { struct asd_ha_addrspace *io_handle; io_handle = &asd_ha->io_handle[1]; iounmap(io_handle->addr); pci_release_region(asd_ha->pcidev, 2); io_handle = &asd_ha->io_handle[0]; iounmap(io_handle->addr); pci_release_region(asd_ha->pcidev, 0); }
static int asd_map_memio(struct asd_ha_struct *asd_ha) { int err, i; struct asd_ha_addrspace *io_handle; asd_ha->iospace = 0; for (i = 0; i < 3; i += 2) { io_handle = &asd_ha->io_handle[i==0?0:1]; io_handle->start = pci_resource_start(asd_ha->pcidev, i); io_handle->len = pci_resource_len(asd_ha->pcidev, i); io_handle->flags = pci_resource_flags(asd_ha->pcidev, i); err = -ENODEV; if (!io_handle->start || !io_handle->len) { asd_printk("MBAR%d start or length for %s is 0.\n", i==0?0:1, pci_name(asd_ha->pcidev)); goto Err; } err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME); if (err) { asd_printk("couldn't reserve memory region for %s\n", pci_name(asd_ha->pcidev)); goto Err; } if (io_handle->flags & IORESOURCE_CACHEABLE) io_handle->addr = ioremap(io_handle->start, io_handle->len); else io_handle->addr = ioremap_nocache(io_handle->start, io_handle->len); if (!io_handle->addr) { asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1, pci_name(asd_ha->pcidev)); err = -ENOMEM; goto Err_unreq; } } return 0; Err_unreq: pci_release_region(asd_ha->pcidev, i); Err: if (i > 0) { io_handle = &asd_ha->io_handle[0]; iounmap(io_handle->addr); pci_release_region(asd_ha->pcidev, 0); } return err; }
/*************************************************************************/ /*! @Function OSPCIReleaseDev @Description Release a PCI device that is no longer being used @Input hPVRPCI PCI device handle @Return PVRSRV_ERROR Services error code */ /**************************************************************************/ PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) { PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; int i; /* Release all PCI regions that are currently in use */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if (psPVRPCI->abPCIResourceInUse[i]) { pci_release_region(psPVRPCI->psPCIDev, i); psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; } } #if defined(CONFIG_PCI_MSI) if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ { pci_disable_msi(psPVRPCI->psPCIDev); } #endif if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ { pci_clear_master(psPVRPCI->psPCIDev); } pci_disable_device(psPVRPCI->psPCIDev); kfree((IMG_VOID *)psPVRPCI); /*not nulling pointer, copy on stack*/ return PVRSRV_OK; }
static void wil_pcie_remove(struct pci_dev *pdev) { struct wil6210_priv *wil = pci_get_drvdata(pdev); void __iomem *csr = wil->csr; wil_dbg_misc(wil, "%s()\n", __func__); #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&wil->pm_notify); #endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM */ wil6210_debugfs_remove(wil); rtnl_lock(); wil_p2p_wdev_free(wil); rtnl_unlock(); wil_if_remove(wil); wil_if_pcie_disable(wil); pci_iounmap(pdev, csr); pci_release_region(pdev, 0); pci_disable_device(pdev); if (wil->platform_ops.uninit) wil->platform_ops.uninit(wil->platform_handle); wil_if_free(wil); }
static void tpci200_unregister(struct tpci200_board *tpci200) { free_irq(tpci200->info->pdev->irq, (void *) tpci200); pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); pci_disable_device(tpci200->info->pdev); pci_dev_put(tpci200->info->pdev); }
static int __devinit tc86c001_init_one(struct pci_dev *dev, const struct pci_device_id *id) { int rc; rc = pci_enable_device(dev); if (rc) goto out; rc = pci_request_region(dev, 5, DRV_NAME); if (rc) { printk(KERN_ERR DRV_NAME ": system control regs already in use"); goto out_disable; } rc = ide_pci_init_one(dev, &tc86c001_chipset, NULL); if (rc) goto out_release; goto out; out_release: pci_release_region(dev, 5); out_disable: pci_disable_device(dev); out: return rc; }
/* the PCI probing function */ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev) { struct pci_dev *pci_dev = vp_dev->pci_dev; int rc; /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) return -ENODEV; if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", VIRTIO_PCI_ABI_VERSION, pci_dev->revision); return -ENODEV; } rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64)); if (rc) { rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); } else { /* * The virtio ring base address is expressed as a 32-bit PFN, * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT. */ dma_set_coherent_mask(&pci_dev->dev, DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT)); } if (rc) dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy"); if (rc) return rc; rc = -ENOMEM; vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); if (!vp_dev->ioaddr) goto err_iomap; vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR; /* we use the subsystem vendor/device id as the virtio vendor/device * id. this allows us to use the same PCI vendor/device id for all * virtio devices and to identify the particular virtio driver by * the subsystem ids */ vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.device = pci_dev->subsystem_device; vp_dev->vdev.config = &virtio_pci_config_ops; vp_dev->config_vector = vp_config_vector; vp_dev->setup_vq = setup_vq; vp_dev->del_vq = del_vq; return 0; err_iomap: pci_release_region(pci_dev, 0); return rc; }
void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev) { struct pci_dev *pci_dev = vp_dev->pci_dev; pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_region(pci_dev, 0); }
static void ath_pci_cleanup(struct ath_softc *sc) { struct pci_dev *pdev = to_pci_dev(sc->dev); pci_iounmap(pdev, sc->mem); pci_disable_device(pdev); pci_release_region(pdev, 0); }
static void piix4_poweroff_remove(struct pci_dev *dev) { if (pm_power_off == piix4_poweroff) pm_power_off = NULL; pci_release_region(dev, piix4_pm_io_region); pm_dev = NULL; }
static void test_pci_remove(struct pci_dev *pdev) { pci_free_consistent(dev_data->pdev, TEST_CDMA_BUFFER_SIZE, dev_data->cdma_buffer, dev_data->cdma_addr); cdev_del(dev_data->cdev); unregister_chrdev_region(test_pci_devt, test_pci_devs_max); free_irq(dev_data->pdev->irq, dev_data->pdev); iounmap(dev_data->mmio_addr); pci_release_region(dev_data->pdev, BAR_MMIO); pci_release_region(dev_data->pdev, BAR_PIO); pci_disable_device(dev_data->pdev); tprintk("%s driver (major %d) unloaded\n", DRIVER_TEST_NAME, test_pci_major); }
static void cxl_remove_adapter(struct cxl *adapter) { struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); pr_devel("cxl_release_adapter\n"); cxl_sysfs_adapter_remove(adapter); cxl_debugfs_adapter_remove(adapter); cxl_release_psl_err_irq(adapter); cxl_unmap_adapter_regs(adapter); cxl_remove_adapter_nr(adapter); device_unregister(&adapter->dev); pci_release_region(pdev, 0); pci_release_region(pdev, 2); pci_disable_device(pdev); }
static int __init cs5535_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { int err; ulong mask_orig = mask; /* There are two ways to get the GPIO base address; one is by * fetching it from MSR_LBAR_GPIO, the other is by reading the * PCI BAR info. The latter method is easier (especially across * different architectures), so we'll stick with that for now. If * it turns out to be unreliable in the face of crappy BIOSes, we * can always go back to using MSRs.. */ err = pci_enable_device_io(pdev); if (err) { dev_err(&pdev->dev, "can't enable device IO\n"); goto done; } err = pci_request_region(pdev, GPIO_BAR, DRV_NAME); if (err) { dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR); goto done; } /* set up the driver-specific struct */ cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR); cs5535_gpio_chip.pdev = pdev; spin_lock_init(&cs5535_gpio_chip.lock); dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR, (unsigned long long) cs5535_gpio_chip.base); /* mask out reserved pins */ mask &= 0x1F7FFFFF; /* do not allow pin 28, Power Button, as there's special handling * in the PMC needed. (note 12, p. 48) */ mask &= ~(1 << 28); if (mask_orig != mask) dev_info(&pdev->dev, "mask changed from 0x%08lX to 0x%08lX\n", mask_orig, mask); /* finally, register with the generic GPIO API */ err = gpiochip_add(&cs5535_gpio_chip.chip); if (err) goto release_region; dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n"); return 0; release_region: pci_release_region(pdev, GPIO_BAR); done: return err; }
static void rtbt_pci_remove(struct pci_dev *pdev) { struct hci_dev *hci_dev = (struct hci_dev *)pci_get_drvdata(pdev); struct rtbt_os_ctrl *os_ctrl; struct rtbt_dev_ops *dev_ops; void __iomem *csr_addr; if (hci_dev == NULL){ printk("%s(): pci_get_drvdata failed!\n", __FUNCTION__); return; } os_ctrl = (struct rtbt_os_ctrl *) hci_get_drvdata(hci_dev); if (os_ctrl == NULL) { printk("%s(): hci_dev->driver_data is NULL!\n", __FUNCTION__); return; } csr_addr = os_ctrl->if_ops.pci_ops.csr_addr; printk("%s():csr_addr=0x%lx!\n", __FUNCTION__, (unsigned long)os_ctrl->if_ops.pci_ops.csr_addr); #if 0 rtbt_hps_iface_detach(os_ctrl); #endif rtbt_hps_iface_deinit(RAL_INF_PCI, pdev, os_ctrl); rtbth_us_deinit(os_ctrl->dev_ctrl); if (os_ctrl->dev_ops) { dev_ops = os_ctrl->dev_ops; if (dev_ops->dev_resource_deinit) dev_ops->dev_resource_deinit(os_ctrl); if (dev_ops->dev_ctrl_deinit) dev_ops->dev_ctrl_deinit(os_ctrl); } // rtbth_us_deinit(os_ctrl->dev_ctrl); pci_set_drvdata(pdev, NULL); if (csr_addr != NULL) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) pci_iounmap(pdev, csr_addr); #else iounmap(csr_addr); #endif } pci_release_region(pdev, 0); pci_disable_device(pdev); printk("Exit from rtbt_pci_remove!\n"); }
/*************************************************************************/ /*! @Function OSPCIAddrRangeFunc @Description Internal support function for various address range related functions @Input eFunc Function to perform @Input hPVRPCI PCI device handle @Input ui32Index Address range index @Return IMG_UINT32 Function dependent value */ /**************************************************************************/ static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc, PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) { PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; if (ui32Index >= DEVICE_COUNT_RESOURCE) { printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range"); return 0; } switch (eFunc) { case HOST_PCI_ADDR_RANGE_FUNC_LEN: { return pci_resource_len(psPVRPCI->psPCIDev, ui32Index); } case HOST_PCI_ADDR_RANGE_FUNC_START: { return pci_resource_start(psPVRPCI->psPCIDev, ui32Index); } case HOST_PCI_ADDR_RANGE_FUNC_END: { return pci_resource_end(psPVRPCI->psPCIDev, ui32Index); } case HOST_PCI_ADDR_RANGE_FUNC_REQUEST: { int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME); if (err != 0) { printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err); return 0; } psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE; return 1; } case HOST_PCI_ADDR_RANGE_FUNC_RELEASE: { if (psPVRPCI->abPCIResourceInUse[ui32Index]) { pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index); psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE; } return 1; } default: { printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function"); break; } } return 0; }
static void __devexit spi_pci_remove(struct pci_dev *pdev) { struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); dw_spi_remove_host(&dwpci->dws); iounmap(dwpci->dws.regs); pci_release_region(pdev, 0); kfree(dwpci); pci_disable_device(pdev); }
static void chipsfb_remove(struct pci_dev *dp) { struct fb_info *p = pci_get_drvdata(dp); if (p->screen_base == NULL) return; unregister_framebuffer(p); iounmap(p->screen_base); p->screen_base = NULL; pci_release_region(dp, 0); }
static void __exit cs5535_gpio_remove(struct pci_dev *pdev) { int err; err = gpiochip_remove(&cs5535_gpio_chip.chip); if (err) { /* uhh? */ dev_err(&pdev->dev, "unable to remove gpio_chip?\n"); } pci_release_region(pdev, GPIO_BAR); }
void my_remove(struct pci_dev *pdev) { printk(KERN_INFO "probe: bus->number: %2.x pci_slot: %x pci_func: %x\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); void* mem = pci_get_drvdata(pdev); iounmap(mem); pci_release_region(pdev, 0); pci_disable_device(pdev); }
static void mlx4_remove_one(struct pci_dev *pdev) { struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); int p; if (dev) { mlx4_stop_sense(dev); mlx4_unregister_device(dev); for (p = 1; p <= dev->caps.num_ports; p++) { mlx4_cleanup_port_info(&priv->port[p]); mlx4_CLOSE_PORT(dev, p); } mlx4_cleanup_mcg_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); mlx4_cleanup_mr_table(dev); mlx4_cleanup_pd_table(dev); iounmap(priv->kar); mlx4_uar_free(dev, &priv->driver_uar); mlx4_cleanup_uar_table(dev); mlx4_free_eq_table(dev); mlx4_close_hca(dev); mlx4_cmd_cleanup(dev); if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); kfree(priv); pci_release_region(pdev, 2); pci_release_region(pdev, 0); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } }
static void wil_pcie_remove(struct pci_dev *pdev) { struct wil6210_priv *wil = pci_get_drvdata(pdev); wil6210_debugfs_remove(wil); wil_if_pcie_disable(wil); wil_if_remove(wil); wil_if_free(wil); pci_iounmap(pdev, wil->csr); pci_release_region(pdev, 0); pci_disable_device(pdev); }
static void __devexit intelce_wdt_remove(struct pci_dev *pdev) { /* Stop the timer before we leave */ if (!nowayout) intelce_wdt_stop(); /* Deregister */ misc_deregister(&intelce_wdt_miscdev); iounmap(BASEADDR); pci_release_region(intelce_wdt_pci, 0); pci_disable_device(intelce_wdt_pci); intelce_wdt_pci = NULL; }
static void esb_remove(struct pci_dev *pdev) { /* Stop the timer before we leave */ if (!nowayout) esb_timer_stop(); /* Deregister */ misc_deregister(&esb_miscdev); iounmap(BASEADDR); pci_release_region(esb_pci, 0); pci_disable_device(esb_pci); esb_pci = NULL; }
static int vmw_driver_unload(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); enum vmw_res_type i; unregister_pm_notifier(&dev_priv->pm_nb); if (dev_priv->ctx.res_ht_initialized) drm_ht_remove(&dev_priv->ctx.res_ht); vfree(dev_priv->ctx.cmd_bounce); if (dev_priv->enable_fb) { vmw_fb_off(dev_priv); vmw_fb_close(dev_priv); vmw_fifo_resource_dec(dev_priv); vmw_svga_disable(dev_priv); } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); if (dev_priv->has_gmr) (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); vmw_release_device_early(dev_priv); if (dev_priv->has_mob) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); (void) ttm_bo_device_release(&dev_priv->bdev); vmw_release_device_late(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else pci_release_regions(dev->pdev); ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); arch_phys_wc_del(dev_priv->mmio_mtrr); if (dev_priv->ctx.staged_bindings) vmw_binding_state_free(dev_priv->ctx.staged_bindings); vmw_ttm_global_release(dev_priv); for (i = vmw_res_context; i < vmw_res_max; ++i) idr_destroy(&dev_priv->res_idr[i]); kfree(dev_priv); return 0; }
static void dt3155_remove(struct pci_dev *pdev) { struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev); struct dt3155_priv *pd = container_of(v4l2_dev, struct dt3155_priv, v4l2_dev); video_unregister_device(&pd->vdev); free_irq(pd->pdev->irq, pd); vb2_queue_release(&pd->vidq); v4l2_device_unregister(&pd->v4l2_dev); pci_iounmap(pdev, pd->regs); pci_release_region(pdev, 0); pci_disable_device(pdev); }
static void altera_cvp_remove(struct pci_dev *pdev) { struct fpga_manager *mgr = pci_get_drvdata(pdev); struct altera_cvp_conf *conf = mgr->priv; u16 cmd; driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg); fpga_mgr_unregister(&pdev->dev); pci_iounmap(pdev, conf->map); pci_release_region(pdev, CVP_BAR); pci_read_config_word(pdev, PCI_COMMAND, &cmd); cmd &= ~PCI_COMMAND_MEMORY; pci_write_config_word(pdev, PCI_COMMAND, cmd); }