static int wil6210_suspend(struct device *dev, bool is_runtime) { int rc = 0; struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); wil_dbg_pm(wil, "%s(%s)\n", __func__, is_runtime ? "runtime" : "system"); rc = wil_can_suspend(wil, is_runtime); if (rc) goto out; rc = wil_suspend(wil, is_runtime); if (rc) goto out; /* TODO: how do I bring card in low power state? */ /* disable bus mastering */ pci_clear_master(pdev); /* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */ out: return rc; }
void mlx5_dev_cleanup(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); mlx5_stop_eqs(dev); mlx5_free_uuars(dev, &priv->uuari); mlx5_eq_cleanup(dev); mlx5_disable_msix(dev); mlx5_stop_health_poll(dev); if (mlx5_cmd_teardown_hca(dev)) { dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); return; } mlx5_pagealloc_stop(dev); mlx5_reclaim_startup_pages(dev); mlx5_core_disable_hca(dev); mlx5_pagealloc_cleanup(dev); mlx5_cmd_cleanup(dev); iounmap(dev->iseg); pci_clear_master(dev->pdev); release_bar(dev->pdev); pci_disable_device(dev->pdev); debugfs_remove(priv->dbg_root); }
static int soc15_asic_reset(struct amdgpu_device *adev) { u32 i; amdgpu_atombios_scratch_regs_engine_hung(adev, true); dev_info(adev->dev, "GPU reset\n"); /* disable BM */ pci_clear_master(adev->pdev); pci_save_state(adev->pdev); psp_gpu_reset(adev); pci_restore_state(adev->pdev); /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { u32 memsize = adev->nbio_funcs->get_memsize(adev); if (memsize != 0xffffffff) break; udelay(1); } amdgpu_atombios_scratch_regs_engine_hung(adev, false); return 0; }
static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) { u32 i; dev_info(adev->dev, "GPU pci config reset\n"); /* disable BM */ pci_clear_master(adev->pdev); /* reset */ amdgpu_device_pci_config_reset(adev); udelay(100); /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { /* enable BM */ pci_set_master(adev->pdev); adev->has_hw_reset = true; return 0; } udelay(1); } return -EINVAL; }
/* called when PCI device is removed or module unloaded */ static void timing_dev_remove(struct pci_dev *dev) { #if DEBUG != 0 printk(KERN_DEBUG "timing_dev_remove() entry\n"); #endif /* release resources */ free_irq(irq_line, timing_card); pci_iounmap(dev, timing_card[0].base); pci_iounmap(dev, master_chip->base); pci_release_regions(dev); pci_clear_master(dev); pci_disable_device(dev); if ( dma_kthread ) { kthread_stop(dma_kthread); wake_up_process(dma_kthread); } #if DEBUG != 0 printk(KERN_DEBUG "timing_dev_remove() exit success\n"); #endif return; } /* end timing_dev_remove */
/*************************************************************************/ /*! @Function OSPCIReleaseDev @Description Release a PCI device that is no longer being used @Input hPVRPCI PCI device handle @Return PVRSRV_ERROR Services error code */ /**************************************************************************/ PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) { PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; int i; /* Release all PCI regions that are currently in use */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if (psPVRPCI->abPCIResourceInUse[i]) { pci_release_region(psPVRPCI->psPCIDev, i); psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; } } #if defined(CONFIG_PCI_MSI) if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ { pci_disable_msi(psPVRPCI->psPCIDev); } #endif if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ { pci_clear_master(psPVRPCI->psPCIDev); } pci_disable_device(psPVRPCI->psPCIDev); kfree((IMG_VOID *)psPVRPCI); /*not nulling pointer, copy on stack*/ return PVRSRV_OK; }
static void bbswitch_off(void) { if (is_card_disabled()) return; // to prevent the system from possibly locking up, don't disable the device // if it's still in use by a driver (i.e. nouveau or nvidia) if (dis_dev->driver) { pr_warn("device %s is in use by driver '%s', refusing OFF\n", dev_name(&dis_dev->dev), dis_dev->driver->name); return; } pr_info("disabling discrete graphics\n"); if (bbswitch_optimus_dsm()) { pr_warn("Optimus ACPI call failed, the device is not disabled\n"); return; } pci_save_state(dis_dev); pci_clear_master(dis_dev); pci_disable_device(dis_dev); pci_set_power_state(dis_dev, PCI_D3cold); if (bbswitch_acpi_off()) pr_warn("The discrete card could not be disabled by a _DSM call\n"); }
static void bbswitch_off(void) { if (is_card_disabled()) return; // to prevent the system from possibly locking up, don't disable the device // if it's still in use by a driver (i.e. nouveau or nvidia) if (dis_dev->driver) { printk(KERN_WARNING "bbswitch: device %s is in use by driver '%s', " "refusing OFF\n", dev_name(&dis_dev->dev), dis_dev->driver->name); return; } printk(KERN_INFO "bbswitch: disabling discrete graphics\n"); if (dsm_type == DSM_TYPE_OPTIMUS && bbswitch_optimus_dsm()) { printk(KERN_WARNING "bbswitch: ACPI call failed, the device is not" " disabled\n"); return; } pci_save_state(dis_dev); pci_clear_master(dis_dev); pci_disable_device(dis_dev); pci_set_power_state(dis_dev, PCI_D3hot); if (bbswitch_acpi_off()) printk(KERN_WARNING "bbswitch: The discrete card could not be disabled" " by a _DSM call\n"); }
/* Bus ops */ static int wil_if_pcie_enable(struct wil6210_priv *wil) { struct pci_dev *pdev = wil->pdev; int rc; pci_set_master(pdev); /* * how many MSI interrupts to request? */ switch (use_msi) { case 3: case 1: case 0: break; default: wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi); use_msi = 1; } wil->n_msi = use_msi; if (wil->n_msi) { wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi); rc = pci_enable_msi_block(pdev, wil->n_msi); if (rc && (wil->n_msi == 3)) { wil_err(wil, "3 MSI mode failed, try 1 MSI\n"); wil->n_msi = 1; rc = pci_enable_msi_block(pdev, wil->n_msi); } if (rc) { wil_err(wil, "pci_enable_msi failed, use INTx\n"); wil->n_msi = 0; } } else { wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n"); } rc = wil6210_init_irq(wil, pdev->irq); if (rc) goto stop_master; /* need reset here to obtain MAC */ rc = wil_reset(wil); if (debug_fw) rc = 0; if (rc) goto release_irq; return 0; release_irq: wil6210_fini_irq(wil, pdev->irq); /* safe to call if no MSI */ pci_disable_msi(pdev); stop_master: pci_clear_master(pdev); return rc; }
bool bm_pci_set_bus_master_enable(bm_pci_device_t* pci, bool enable) { bool ret = pci->pdev->is_busmaster; if (enable) pci_set_master(pci->pdev); else pci_clear_master(pci->pdev); return ret; }
/** * ufshcd_pci_probe - probe routine of the driver * @pdev: pointer to PCI device handle * @id: PCI device id * * Returns 0 on success, non-zero value on failure */ static int ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ufs_hba *hba; void __iomem *mmio_base; int err; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "pci_enable_device failed\n"); goto out_error; } pci_set_master(pdev); err = pci_request_regions(pdev, UFSHCD); if (err < 0) { dev_err(&pdev->dev, "request regions failed\n"); goto out_disable; } mmio_base = pci_ioremap_bar(pdev, 0); if (!mmio_base) { dev_err(&pdev->dev, "memory map failed\n"); err = -ENOMEM; goto out_release_regions; } err = ufshcd_set_dma_mask(pdev); if (err) { dev_err(&pdev->dev, "set dma mask failed\n"); goto out_iounmap; } err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq); if (err) { dev_err(&pdev->dev, "Initialization failed\n"); goto out_iounmap; } pci_set_drvdata(pdev, hba); return 0; out_iounmap: iounmap(mmio_base); out_release_regions: pci_release_regions(pdev); out_disable: pci_clear_master(pdev); pci_disable_device(pdev); out_error: return err; }
static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev) { struct pci_dev *pdev = ndev->ntb.pdev; pci_iounmap(pdev, ndev->self_mmio); pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); }
/** * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space * data structure memory * @pdev - pointer to PCI handle */ static void ufshcd_pci_remove(struct pci_dev *pdev) { struct ufs_hba *hba = pci_get_drvdata(pdev); disable_irq(pdev->irq); ufshcd_remove(hba); pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); pci_clear_master(pdev); pci_disable_device(pdev); }
static int wil_if_pcie_disable(struct wil6210_priv *wil) { struct pci_dev *pdev = wil->pdev; pci_clear_master(pdev); /* disable and release IRQ */ wil6210_fini_irq(wil, pdev->irq); /* safe to call if no MSI */ pci_disable_msi(pdev); /* TODO: disable HW */ return 0; }
/* Bus ops */ static int wil_if_pcie_enable(struct wil6210_priv *wil) { struct pci_dev *pdev = wil->pdev; int rc; /* on platforms with buggy ACPI, pdev->msi_enabled may be set to * allow pci_enable_device to work. This indicates INTx was not routed * and only MSI should be used */ int msi_only = pdev->msi_enabled; bool _use_msi = use_msi; wil_dbg_misc(wil, "%s()\n", __func__); pdev->msi_enabled = 0; pci_set_master(pdev); wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx"); if (use_msi && pci_enable_msi(pdev)) { wil_err(wil, "pci_enable_msi failed, use INTx\n"); _use_msi = false; } if (!_use_msi && msi_only) { wil_err(wil, "Interrupt pin not routed, unable to use INTx\n"); rc = -ENODEV; goto stop_master; } rc = wil6210_init_irq(wil, pdev->irq, _use_msi); if (rc) goto stop_master; /* need reset here to obtain MAC */ mutex_lock(&wil->mutex); rc = wil_reset(wil, false); mutex_unlock(&wil->mutex); if (rc) goto release_irq; return 0; release_irq: wil6210_fini_irq(wil, pdev->irq); /* safe to call if no MSI */ pci_disable_msi(pdev); stop_master: pci_clear_master(pdev); return rc; }
static void ec_bhf_remove(struct pci_dev *dev) { struct net_device *net_dev = pci_get_drvdata(dev); struct ec_bhf_priv *priv = netdev_priv(net_dev); unregister_netdev(net_dev); free_netdev(net_dev); pci_iounmap(dev, priv->dma_io); pci_iounmap(dev, priv->io); pci_release_regions(dev); pci_clear_master(dev); pci_disable_device(dev); }
static int amd_ntb_init_pci(struct amd_ntb_dev *ndev, struct pci_dev *pdev) { int rc; pci_set_drvdata(pdev, ndev); rc = pci_enable_device(pdev); if (rc) goto err_pci_enable; rc = pci_request_regions(pdev, NTB_NAME); if (rc) goto err_pci_regions; pci_set_master(pdev); rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) goto err_dma_mask; dev_warn(&pdev->dev, "Cannot DMA highmem\n"); } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) goto err_dma_mask; dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n"); } ndev->self_mmio = pci_iomap(pdev, 0, 0); if (!ndev->self_mmio) { rc = -EIO; goto err_dma_mask; } ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET; return 0; err_dma_mask: pci_clear_master(pdev); err_pci_regions: pci_disable_device(pdev); err_pci_enable: pci_set_drvdata(pdev, NULL); return rc; }
static void c_can_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(dev); unregister_c_can_dev(dev); free_c_can_dev(dev); pci_iounmap(pdev, priv->base); pci_disable_msi(pdev); pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); }
static void __devexit nf10_remove(struct pci_dev *pdev){ struct nf10_card *card; // free private data printk(KERN_INFO "nf10: releasing private memory\n"); card = (struct nf10_card*)pci_get_drvdata(pdev); if(card){ nf10fops_remove(pdev, card); nf10iface_remove(pdev, card); if(card->cfg_addr) iounmap(card->cfg_addr); if(card->tx_dsc) iounmap(card->tx_dsc); if(card->rx_dsc) iounmap(card->rx_dsc); pci_free_consistent(pdev, card->tx_dne_mask+1, card->host_tx_dne_ptr, card->host_tx_dne_dma); pci_free_consistent(pdev, card->rx_dne_mask+1, card->host_rx_dne_ptr, card->host_rx_dne_dma); if(card->tx_bk_dma_addr) kfree(card->tx_bk_dma_addr); if(card->tx_bk_skb) kfree(card->tx_bk_skb); if(card->tx_bk_size) kfree(card->tx_bk_size); if(card->tx_bk_port) kfree(card->tx_bk_port); if(card->rx_bk_dma_addr) kfree(card->rx_bk_dma_addr); if(card->rx_bk_skb) kfree(card->rx_bk_skb); if(card->rx_bk_size) kfree(card->rx_bk_size); kfree(card); } pci_set_drvdata(pdev, NULL); // release memory printk(KERN_INFO "nf10: releasing mem region\n"); release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); release_mem_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); // disabling device printk(KERN_INFO "nf10: disabling device\n"); pci_disable_msi(pdev); pci_clear_master(pdev); pci_disable_device(pdev); atomic64_dec(&detected_cards); }
static int wil6210_resume(struct device *dev, bool is_runtime) { int rc = 0; struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); wil_dbg_pm(wil, "%s(%s)\n", __func__, is_runtime ? "runtime" : "system"); /* allow master */ pci_set_master(pdev); rc = wil_resume(wil, is_runtime); if (rc) pci_clear_master(pdev); return rc; }
static void bbswitch_off(void) { if (is_card_disabled()) return; // to prevent the system from possibly locking up, don't disable the device // if it's still in use by a driver (i.e. nouveau or nvidia) if (dis_dev->driver) { pr_warn("device %s is in use by driver '%s', refusing OFF\n", dev_name(&dis_dev->dev), dis_dev->driver->name); return; } pr_info("disabling discrete graphics\n"); if (bbswitch_optimus_dsm()) { pr_warn("Optimus ACPI call failed, the device is not disabled\n"); return; } pci_save_state(dis_dev); pci_clear_master(dis_dev); pci_disable_device(dis_dev); do { struct acpi_device *ad = NULL; int r; r = acpi_bus_get_device(dis_handle, &ad); if (r || !ad) { pr_warn("Cannot get ACPI device for PCI device\n"); break; } if (ad->power.state == ACPI_STATE_UNKNOWN) { pr_debug("ACPI power state is unknown, forcing D0\n"); ad->power.state = ACPI_STATE_D0; } } while (0); pci_set_power_state(dis_dev, PCI_D3cold); if (bbswitch_acpi_off()) pr_warn("The discrete card could not be disabled by a _DSM call\n"); }
static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) { u32 i; dev_info(adev->dev, "GPU pci config reset\n"); /* disable BM */ pci_clear_master(adev->pdev); /* reset */ amdgpu_pci_config_reset(adev); udelay(100); /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) break; udelay(1); } }
static int igbuio_pci_release(struct uio_info *info, struct inode *inode) { struct rte_uio_pci_dev *udev = info->priv; struct pci_dev *dev = udev->pdev; mutex_lock(&udev->lock); if (--udev->refcnt > 0) { mutex_unlock(&udev->lock); return 0; } /* disable interrupts */ igbuio_pci_disable_interrupts(udev); /* stop the device from further DMA */ pci_clear_master(dev); mutex_unlock(&udev->lock); return 0; }
static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { struct xen_pcibk_dev_data *dev_data; int err; u16 val; struct pci_cmd_info *cmd = data; dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable\n", pci_name(dev)); err = pci_enable_device(dev); if (err) return err; if (dev_data) dev_data->enable_intx = 1; } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable\n", pci_name(dev)); pci_disable_device(dev); if (dev_data) dev_data->enable_intx = 0; } if (!dev->is_busmaster && is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n", pci_name(dev)); pci_set_master(dev); } else if (dev->is_busmaster && !is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: clear bus master\n", pci_name(dev)); pci_clear_master(dev); } if (!(cmd->val & PCI_COMMAND_INVALIDATE) && (value & PCI_COMMAND_INVALIDATE)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable memory-write-invalidate\n", pci_name(dev)); err = pci_set_mwi(dev); if (err) { pr_warn("%s: cannot enable memory-write-invalidate (%d)\n", pci_name(dev), err); value &= ~PCI_COMMAND_INVALIDATE; } } else if ((cmd->val & PCI_COMMAND_INVALIDATE) && !(value & PCI_COMMAND_INVALIDATE)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable memory-write-invalidate\n", pci_name(dev)); pci_clear_mwi(dev); } cmd->val = value; if (!permissive && (!dev_data || !dev_data->permissive)) return 0; /* Only allow the guest to control certain bits. */ err = pci_read_config_word(dev, offset, &val); if (err || val == value) return err; value &= PCI_COMMAND_GUEST; value |= val & ~PCI_COMMAND_GUEST; return pci_write_config_word(dev, offset, value); }
static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct net_device *net_dev; struct ec_bhf_priv *priv; void __iomem *dma_io; void __iomem *io; int err = 0; err = pci_enable_device(dev); if (err) return err; pci_set_master(dev); err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); if (err) { dev_err(&dev->dev, "Required dma mask not supported, failed to initialize device\n"); err = -EIO; goto err_disable_dev; } err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)); if (err) { dev_err(&dev->dev, "Required dma mask not supported, failed to initialize device\n"); goto err_disable_dev; } err = pci_request_regions(dev, "ec_bhf"); if (err) { dev_err(&dev->dev, "Failed to request pci memory regions\n"); goto err_disable_dev; } io = pci_iomap(dev, 0, 0); if (!io) { dev_err(&dev->dev, "Failed to map pci card memory bar 0"); err = -EIO; goto err_release_regions; } dma_io = pci_iomap(dev, 2, 0); if (!dma_io) { dev_err(&dev->dev, "Failed to map pci card memory bar 2"); err = -EIO; goto err_unmap; } net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv)); if (net_dev == NULL) { err = -ENOMEM; goto err_unmap_dma_io; } pci_set_drvdata(dev, net_dev); SET_NETDEV_DEV(net_dev, &dev->dev); net_dev->features = 0; net_dev->flags |= IFF_NOARP; net_dev->netdev_ops = &ec_bhf_netdev_ops; priv = netdev_priv(net_dev); priv->net_dev = net_dev; priv->io = io; priv->dma_io = dma_io; priv->dev = dev; err = ec_bhf_setup_offsets(priv); if (err < 0) goto err_free_net_dev; memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); err = register_netdev(net_dev); if (err < 0) goto err_free_net_dev; return 0; err_free_net_dev: free_netdev(net_dev); err_unmap_dma_io: pci_iounmap(dev, dma_io); err_unmap: pci_iounmap(dev, io); err_release_regions: pci_release_regions(dev); err_disable_dev: pci_clear_master(dev); pci_disable_device(dev); return err; }
static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) { struct amdgpu_mode_mc_save save; u32 tmp, i; dev_info(adev->dev, "GPU pci config reset\n"); /* disable dpm? */ /* disable cg/pg */ /* Disable GFX parsing/prefetching */ tmp = RREG32(mmCP_ME_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); WREG32(mmCP_ME_CNTL, tmp); /* Disable MEC parsing/prefetching */ tmp = RREG32(mmCP_MEC_CNTL); tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); WREG32(mmCP_MEC_CNTL, tmp); /* Disable GFX parsing/prefetching */ WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); /* Disable MEC parsing/prefetching */ WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); /* sdma0 */ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); /* sdma1 */ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); /* XXX other engines? */ /* halt the rlc, disable cp internal ints */ //XXX //gfx_v8_0_rlc_stop(adev); udelay(50); /* disable mem access */ gmc_v8_0_mc_stop(adev, &save); if (amdgpu_asic_wait_for_mc_idle(adev)) { dev_warn(adev->dev, "Wait for MC idle timed out !\n"); } /* disable BM */ pci_clear_master(adev->pdev); /* reset */ amdgpu_pci_config_reset(adev); udelay(100); /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) break; udelay(1); } }
/* called when kernel matches PCI hardware to this module */ static int timing_dev_probe(struct pci_dev *dev, const struct pci_device_id *id) { int rc, i, j; #if DEBUG != 0 printk(KERN_DEBUG "timing_dev_probe() entry\n"); #endif /* enable the card */ rc = pci_enable_device(dev); if ( rc ) { printk(KERN_ALERT "Failed to enable timing card (%d)\n", rc); return rc; } /* first, check if this card is ours... although */ /* the vendor ID should have taken care of */ /* this, that ID was actually the bridge */ /* chip We tell this from other cards using */ /* the sub_vendorID and sub_deviceID */ if ( (ADLINK_VENDOR_ID != dev->subsystem_vendor) || (ADLINK_7300A_ID != dev->subsystem_device) ) { printk(KERN_ALERT "Timing Driver rejected device: "); printk(KERN_ALERT "--sub_vendor ID = %x ", dev->subsystem_vendor); printk(KERN_ALERT "--sub_device ID = %x\n", dev->subsystem_device); pci_disable_device(dev); return -ENODEV; /* no such device error */ } /* NOW WE ARE DEALING WITH THE TIMING CARD */ /* simple initialization */ dev_ptr = dev; dma_waiting = 0; dma_configured = 0; output_enabled = 0; /* enable DMA */ pci_set_master(dev); if ( pci_set_dma_mask(dev, DMA_BIT_MASK(32)) ) { printk(KERN_ALERT "DMA NOT SUPPORTED: Aboting."); return -ENODEV; /* not the device we expected */ } else printk(KERN_WARNING "Doing DMA with 32 bits\n"); /* retrieve assigned interrupt line number */ /* -> see linux/pci.h lines 255 & 256 */ irq_line = dev->irq; /* request interrupt line number */ /* -> common practice says to put this */ /* in device open but this could be */ /* needed if the user closes the */ /* device before DMA transfer done */ rc = request_irq(irq_line, timing_interrupt_handler, IRQF_SHARED, "timing", timing_card); if ( rc ) { printk(KERN_ALERT "Failed to register irq %d\n", irq_line); return rc; } /* must claim proprietary access to memory region */ /* mapped to the device */ rc = pci_request_regions(dev, timing_driver.name); if ( rc ) { printk(KERN_ALERT "Memory region collision for TIMING card\n"); goto request_fail; } /* retrieve base address of mmapped regions */ /* -> common practice avoids reading */ /* the PCI config space directly */ timing_card[0].len = pci_resource_len(dev, TIMING_BAR); timing_card[0].base = pci_iomap(dev, TIMING_BAR, /* +1 for maxlen */ timing_card[0].len + 1); if (!timing_card[0].base) { printk(KERN_ALERT "Failed to find Timing base address\n"); rc = -ENODEV; /* no device error */ goto no_base; } /* already did this for timing_card[0] */ i = 1; /* init other IO port vals */ for ( j = 1; j < TIMING_IOPORT_COUNT; j++, i++ ) { timing_card[i].len = timing_card[0].len; timing_card[i].base = timing_card[0].base + (i*TIMING_IOPORT_SIZE); } /* and onboard timer vals */ for ( j = 0; j < TIMING_8254_COUNT; j++, i++ ) { timing_card[i].len = timing_card[0].len; timing_card[i].base = timing_card[0].base + (i*TIMING_IOPORT_SIZE); } /* finally, set up for Bus Mater (LCR) (PLX9080) */ timing_card[i].len = pci_resource_len(dev, PLX9080_BAR); timing_card[i].base = pci_iomap(dev, PLX9080_BAR, /* +1 for maxlen */ timing_card[i].len + 1); master_chip = &timing_card[i]; #if DEBUG != 0 printk(KERN_DEBUG "timing_dev_probe() exit success\n"); #endif /* END NORMAL CONTROL FLOW */ return 0; /* ERROR HANDLING */ no_base: pci_release_regions(dev); request_fail: pci_disable_device(dev); pci_clear_master(dev); return rc; } /* end timing_dev_probe */
/** * stmmac_pci_probe * * @pdev: pci device pointer * @id: pointer to table of device id/id's. * * Description: This probing function gets called for all PCI devices which * match the ID table and are not "owned" by other driver yet. This function * gets passed a "struct pci_dev *" for each device whose entry in the ID table * matches the device. The probe functions returns zero when the driver choose * to take "ownership" of the device or an error code(-ve no) otherwise. */ static int __devinit stmmac_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret = 0; void __iomem *addr = NULL; struct stmmac_priv *priv = NULL; int i; /* Enable pci device */ ret = pci_enable_device(pdev); if (ret) { pr_err("%s : ERROR: failed to enable %s device\n", __func__, pci_name(pdev)); return ret; } if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) { pr_err("%s: ERROR: failed to get PCI region\n", __func__); ret = -ENODEV; goto err_out_req_reg_failed; } /* Get the base address of device */ for (i = 0; i <= 5; i++) { if (pci_resource_len(pdev, i) == 0) continue; addr = pci_iomap(pdev, i, 0); if (addr == NULL) { pr_err("%s: ERROR: cannot map register memory, aborting", __func__); ret = -EIO; goto err_out_map_failed; } break; } pci_set_master(pdev); stmmac_default_data(); priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr); if (!priv) { pr_err("%s: main driver probe failed", __func__); goto err_out; } priv->dev->irq = pdev->irq; priv->wol_irq = pdev->irq; pci_set_drvdata(pdev, priv->dev); pr_debug("STMMAC platform driver registration completed"); return 0; err_out: pci_clear_master(pdev); err_out_map_failed: pci_release_regions(pdev); err_out_req_reg_failed: pci_disable_device(pdev); return ret; }
static int c_can_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data; struct c_can_priv *priv; struct net_device *dev; void __iomem *addr; int ret; ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_device FAILED\n"); goto out; } ret = pci_request_regions(pdev, KBUILD_MODNAME); if (ret) { dev_err(&pdev->dev, "pci_request_regions FAILED\n"); goto out_disable_device; } ret = pci_enable_msi(pdev); if (!ret) { dev_info(&pdev->dev, "MSI enabled\n"); pci_set_master(pdev); } addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!addr) { dev_err(&pdev->dev, "device has no PCI memory resources, " "failing adapter\n"); ret = -ENOMEM; goto out_release_regions; } /* allocate the c_can device */ dev = alloc_c_can_dev(); if (!dev) { ret = -ENOMEM; goto out_iounmap; } priv = netdev_priv(dev); pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->irq = pdev->irq; priv->base = addr; if (!c_can_pci_data->freq) { dev_err(&pdev->dev, "no clock frequency defined\n"); ret = -ENODEV; goto out_free_c_can; } else { priv->can.clock.freq = c_can_pci_data->freq; } /* Configure CAN type */ switch (c_can_pci_data->type) { case BOSCH_C_CAN: priv->regs = reg_map_c_can; break; case BOSCH_D_CAN: priv->regs = reg_map_d_can; priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; break; default: ret = -EINVAL; goto out_free_c_can; } priv->type = c_can_pci_data->type; /* Configure access to registers */ switch (c_can_pci_data->reg_align) { case C_CAN_REG_ALIGN_32: priv->read_reg = c_can_pci_read_reg_aligned_to_32bit; priv->write_reg = c_can_pci_write_reg_aligned_to_32bit; break; case C_CAN_REG_ALIGN_16: priv->read_reg = c_can_pci_read_reg_aligned_to_16bit; priv->write_reg = c_can_pci_write_reg_aligned_to_16bit; break; default: ret = -EINVAL; goto out_free_c_can; } ret = register_c_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", KBUILD_MODNAME, ret); goto out_free_c_can; } dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", KBUILD_MODNAME, priv->regs, dev->irq); return 0; out_free_c_can: free_c_can_dev(dev); out_iounmap: pci_iounmap(pdev, addr); out_release_regions: pci_disable_msi(pdev); pci_clear_master(pdev); pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: return ret; }
int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) { struct mlx5_priv *priv = &dev->priv; int err; dev->pdev = pdev; pci_set_drvdata(dev->pdev, dev); strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); priv->name[MLX5_MAX_NAME_LEN - 1] = 0; mutex_init(&priv->pgdir_mutex); INIT_LIST_HEAD(&priv->pgdir_list); spin_lock_init(&priv->mkey_lock); priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); if (!priv->dbg_root) return -ENOMEM; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); goto err_dbg; } err = request_bar(pdev); if (err) { dev_err(&pdev->dev, "error requesting BARs, aborting.\n"); goto err_disable; } pci_set_master(pdev); err = set_dma_caps(pdev); if (err) { dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n"); goto err_clr_master; } dev->iseg_base = pci_resource_start(dev->pdev, 0); dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); if (!dev->iseg) { err = -ENOMEM; dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n"); goto err_clr_master; } dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); err = mlx5_cmd_init(dev); if (err) { dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); goto err_unmap; } mlx5_pagealloc_init(dev); err = mlx5_core_enable_hca(dev); if (err) { dev_err(&pdev->dev, "enable hca failed\n"); goto err_pagealloc_cleanup; } err = mlx5_satisfy_startup_pages(dev, 1); if (err) { dev_err(&pdev->dev, "failed to allocate boot pages\n"); goto err_disable_hca; } err = set_hca_ctrl(dev); if (err) { dev_err(&pdev->dev, "set_hca_ctrl failed\n"); goto reclaim_boot_pages; } err = handle_hca_cap(dev); if (err) { dev_err(&pdev->dev, "handle_hca_cap failed\n"); goto reclaim_boot_pages; } err = mlx5_satisfy_startup_pages(dev, 0); if (err) { dev_err(&pdev->dev, "failed to allocate init pages\n"); goto reclaim_boot_pages; } err = mlx5_pagealloc_start(dev); if (err) { dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); goto reclaim_boot_pages; } err = mlx5_cmd_init_hca(dev); if (err) { dev_err(&pdev->dev, "init hca failed\n"); goto err_pagealloc_stop; } mlx5_start_health_poll(dev); err = mlx5_cmd_query_hca_cap(dev, &dev->caps); if (err) { dev_err(&pdev->dev, "query hca failed\n"); goto err_stop_poll; } err = mlx5_cmd_query_adapter(dev); if (err) { dev_err(&pdev->dev, "query adapter failed\n"); goto err_stop_poll; } err = mlx5_enable_msix(dev); if (err) { dev_err(&pdev->dev, "enable msix failed\n"); goto err_stop_poll; } err = mlx5_eq_init(dev); if (err) { dev_err(&pdev->dev, "failed to initialize eq\n"); goto disable_msix; } err = mlx5_alloc_uuars(dev, &priv->uuari); if (err) { dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); goto err_eq_cleanup; } err = mlx5_start_eqs(dev); if (err) { dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); goto err_free_uar; } MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); mlx5_init_cq_table(dev); mlx5_init_qp_table(dev); mlx5_init_srq_table(dev); return 0; err_free_uar: mlx5_free_uuars(dev, &priv->uuari); err_eq_cleanup: mlx5_eq_cleanup(dev); disable_msix: mlx5_disable_msix(dev); err_stop_poll: mlx5_stop_health_poll(dev); if (mlx5_cmd_teardown_hca(dev)) { dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); return err; } err_pagealloc_stop: mlx5_pagealloc_stop(dev); reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); err_disable_hca: mlx5_core_disable_hca(dev); err_pagealloc_cleanup: mlx5_pagealloc_cleanup(dev); mlx5_cmd_cleanup(dev); err_unmap: iounmap(dev->iseg); err_clr_master: pci_clear_master(dev->pdev); release_bar(dev->pdev); err_disable: pci_disable_device(dev->pdev); err_dbg: debugfs_remove(priv->dbg_root); return err; }