Exemple #1
0
static int ndev_init_isr(struct amd_ntb_dev *ndev,
			 int msix_min, int msix_max)
{
	struct pci_dev *pdev;
	int rc, i, msix_count, node;

	pdev = ndev_pdev(ndev);

	node = dev_to_node(&pdev->dev);

	ndev->db_mask = ndev->db_valid_mask;

	/* Try to set up msix irq */
	ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
				 GFP_KERNEL, node);
	if (!ndev->vec)
		goto err_msix_vec_alloc;

	ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
				  GFP_KERNEL, node);
	if (!ndev->msix)
		goto err_msix_alloc;

	for (i = 0; i < msix_max; ++i)
		ndev->msix[i].entry = i;

	msix_count = pci_enable_msix_range(pdev, ndev->msix,
					   msix_min, msix_max);
	if (msix_count < 0)
		goto err_msix_enable;

	/* NOTE: Disable MSIX if msix count is less than 16 because of
	 * hardware limitation.
	 */
	if (msix_count < msix_min) {
		pci_disable_msix(pdev);
		goto err_msix_enable;
	}

	for (i = 0; i < msix_count; ++i) {
		ndev->vec[i].ndev = ndev;
		ndev->vec[i].num = i;
		rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
				 "ndev_vec_isr", &ndev->vec[i]);
		if (rc)
			goto err_msix_request;
	}

	dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
	ndev->db_count = msix_min;
	ndev->msix_vec_count = msix_max;
	return 0;

err_msix_request:
	while (i-- > 0)
		free_irq(ndev->msix[i].vector, &ndev->vec[i]);
	pci_disable_msix(pdev);
err_msix_enable:
	kfree(ndev->msix);
err_msix_alloc:
	kfree(ndev->vec);
err_msix_vec_alloc:
	ndev->msix = NULL;
	ndev->vec = NULL;

	/* Try to set up msi irq */
	rc = pci_enable_msi(pdev);
	if (rc)
		goto err_msi_enable;

	rc = request_irq(pdev->irq, ndev_irq_isr, 0,
			 "ndev_irq_isr", ndev);
	if (rc)
		goto err_msi_request;

	dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
	ndev->db_count = 1;
	ndev->msix_vec_count = 1;
	return 0;

err_msi_request:
	pci_disable_msi(pdev);
err_msi_enable:

	/* Try to set up intx irq */
	pci_intx(pdev, 1);

	rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
			 "ndev_irq_isr", ndev);
	if (rc)
		goto err_intx_request;

	dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
	ndev->db_count = 1;
	ndev->msix_vec_count = 1;
	return 0;

err_intx_request:
	return rc;
}
Exemple #2
0
/**
 * mei_txe_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in mei_txe_pci_tbl
 *
 * Return: 0 on success, <0 on failure.
 */
static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct mei_device *dev;
	struct mei_txe_hw *hw;
	const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
	int err;

	/* enable pci dev */
	err = pcim_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions and mapping IO device memory for mei driver */
	err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "failed to get pci regions.\n");
		goto end;
	}

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pdev->dev, "No suitable DMA available.\n");
			goto end;
		}
	}

	/* allocates and initializes the mei dev structure */
	dev = mei_txe_dev_init(pdev);
	if (!dev) {
		err = -ENOMEM;
		goto end;
	}
	hw = to_txe_hw(dev);
	hw->mem_addr = pcim_iomap_table(pdev);

	pci_enable_msi(pdev);

	/* clear spurious interrupts */
	mei_clear_interrupts(dev);

	/* request and enable interrupt  */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_txe_irq_thread_handler,
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
	else
		err = request_threaded_irq(pdev->irq,
			mei_txe_irq_quick_handler,
			mei_txe_irq_thread_handler,
			IRQF_SHARED, KBUILD_MODNAME, dev);
	if (err) {
		dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
			pdev->irq);
		goto end;
	}

	if (mei_start(dev)) {
		dev_err(&pdev->dev, "init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}

	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
	pm_runtime_use_autosuspend(&pdev->dev);

	err = mei_register(dev, &pdev->dev);
	if (err)
		goto stop;

	pci_set_drvdata(pdev, dev);

	/*
	* For not wake-able HW runtime pm framework
	* can't be used on pci device level.
	* Use domain runtime pm callbacks instead.
	*/
	if (!pci_dev_run_wake(pdev))
		mei_txe_set_pm_domain(dev);

	pm_runtime_put_noidle(&pdev->dev);

	return 0;

stop:
	mei_stop(dev);
release_irq:
	mei_cancel_work(dev);
	mei_disable_interrupts(dev);
	free_irq(pdev->irq, dev);
end:
	dev_err(&pdev->dev, "initialization failed.\n");
	return err;
}
Exemple #3
0
static int dabpci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
    int result;
    printk(KERN_INFO "init DAB PCIe.");
    printk(KERN_INFO "the vendor:0x%x.\nthe device:0x%x.\n",dev->vendor,dev->device);

    /*adapt the space for private*/
    DABDrv = kmalloc(sizeof(struct dab_dev),GFP_KERNEL);

    if(unlikely(!DABDrv))
        return -ENOMEM;
    
    DABDrv->pci_dev = dev;

    /* Enable PCI*/
    result = pci_enable_device(dev);
    
    if(unlikely(result))
        goto disable_pci;
    /*set the pci dma mode*/
    pci_set_master(dev);
    result = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
    if(unlikely(result))
    {
        printk( KERN_ERR "DABPCI: 32-bits PCI DMA address not supported!\n");
        goto disable_pci;
    }
    else
        printk( KERN_INFO "DABPCI: set dma mask successfuly!\n");

    /*request I/O resource */

    DABDrv->mmio_start = pci_resource_start(dev,0);
    DABDrv->mmio_end = pci_resource_end(dev,0);
    DABDrv->mmio_flags = pci_resource_flags(dev,0);
    DABDrv->mmio_len = pci_resource_len(dev,0);

    if(unlikely(!(DABDrv->mmio_flags & IORESOURCE_MEM)))
    {
        printk(KERN_ERR "DABPCI: Failed to alloc IO mem!\n");
    }


    result = pci_request_regions(dev, DAB_NAME);
    if(unlikely(result))
    {
        printk(KERN_ERR "DABPCI: Failed to request IO space!\n");
        goto disable_pci;
    }
    
    DABDrv->mmio_addr = ioremap(DABDrv->mmio_start,DABDrv->mmio_len);
    if(unlikely(!DABDrv->mmio_addr))
    {
        result = -EIO;
        printk(KERN_ERR "DABPCI: Failed to do IO remap!\n");
    }

    /*enable MSI*/
    result = pci_enable_msi(dev);
    if(unlikely(result))
    {
        printk(KERN_ERR "DABPCI: Failed to enable msi!\n");
        return result;
    }

    DABDrv->m_irq = dev->irq;
    result = request_irq(DABDrv->m_irq,DABDrv_interrupt,IRQF_DISABLED,DAB_NAME,NULL);
    if(unlikely(result))
    {
        printk(KERN_ERR "DABPCI: Failed to request interrup.\n");
        goto disable_pci;
    }

    dabpci_major = register_chrdev(0,DAB_CLASS_NAME,&DABDrv_fops);
    if(dabpci_major<0)
    {
        printk(KERN_ERR "DABPCI: Failed to get the major device number:%d;\n",dabpci_major);
        goto disable_pci;
    }
    else
    {
        printk(KERN_INFO "DABPCI: Get the major device number succesful,major = %d\n",dabpci_major);
    }
    




    return 0;


    disable_pci:
        pci_disable_device(dev);
        return result;
}
Exemple #4
0
/**
 * mei_me_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in kcs_pci_tbl
 *
 * Return: 0 on success, <0 on failure.
 */
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
	struct mei_device *dev;
	struct mei_me_hw *hw;
	unsigned int irqflags;
	int err;


	if (!mei_me_quirk_probe(pdev, cfg))
		return -ENODEV;

	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for mei driver */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "failed to get pci regions.\n");
		goto disable_device;
	}

	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {

		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
		if (err)
			err = dma_set_coherent_mask(&pdev->dev,
						    DMA_BIT_MASK(32));
	}
	if (err) {
		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
		goto release_regions;
	}


	/* allocates and initializes the mei dev structure */
	dev = mei_me_dev_init(pdev, cfg);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	hw = to_me_hw(dev);
	/* mapping  IO device memory */
	hw->mem_addr = pci_iomap(pdev, 0, 0);
	if (!hw->mem_addr) {
		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
		err = -ENOMEM;
		goto free_device;
	}
	pci_enable_msi(pdev);

	 /* request and enable interrupt */
	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;

	err = request_threaded_irq(pdev->irq,
			mei_me_irq_quick_handler,
			mei_me_irq_thread_handler,
			irqflags, KBUILD_MODNAME, dev);
	if (err) {
		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
		       pdev->irq);
		goto disable_msi;
	}

	if (mei_start(dev)) {
		dev_err(&pdev->dev, "init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}

	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
	pm_runtime_use_autosuspend(&pdev->dev);

	err = mei_register(dev, &pdev->dev);
	if (err)
		goto release_irq;

	pci_set_drvdata(pdev, dev);

	schedule_delayed_work(&dev->timer_work, HZ);

	/*
	* For not wake-able HW runtime pm framework
	* can't be used on pci device level.
	* Use domain runtime pm callbacks instead.
	*/
	if (!pci_dev_run_wake(pdev))
		mei_me_set_pm_domain(dev);

	if (mei_pg_is_enabled(dev))
		pm_runtime_put_noidle(&pdev->dev);

	dev_dbg(&pdev->dev, "initialization successful.\n");

	return 0;

release_irq:
	mei_cancel_work(dev);
	mei_disable_interrupts(dev);
	free_irq(pdev->irq, dev);
disable_msi:
	pci_disable_msi(pdev);
	pci_iounmap(pdev, hw->mem_addr);
free_device:
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	dev_err(&pdev->dev, "initialization failed.\n");
	return err;
}
Exemple #5
0
static int rtsx_probe(struct pci_dev *pci,
		      const struct pci_device_id *pci_id)
{
	struct Scsi_Host *host;
	struct rtsx_dev *dev;
	int err = 0;
	struct task_struct *th;

	dev_dbg(&pci->dev, "Realtek PCI-E card reader detected\n");

	err = pcim_enable_device(pci);
	if (err < 0) {
		dev_err(&pci->dev, "PCI enable device failed!\n");
		return err;
	}

	err = pci_request_regions(pci, CR_DRIVER_NAME);
	if (err < 0) {
		dev_err(&pci->dev, "PCI request regions for %s failed!\n",
			CR_DRIVER_NAME);
		return err;
	}

	/*
	 * Ask the SCSI layer to allocate a host structure, with extra
	 * space at the end for our private rtsx_dev structure.
	 */
	host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
	if (!host) {
		dev_err(&pci->dev, "Unable to allocate the scsi host\n");
		return -ENOMEM;
	}

	dev = host_to_rtsx(host);
	memset(dev, 0, sizeof(struct rtsx_dev));

	dev->chip = kzalloc(sizeof(*dev->chip), GFP_KERNEL);
	if (!dev->chip) {
		err = -ENOMEM;
		goto chip_alloc_fail;
	}

	spin_lock_init(&dev->reg_lock);
	mutex_init(&dev->dev_mutex);
	init_completion(&dev->cmnd_ready);
	init_completion(&dev->control_exit);
	init_completion(&dev->polling_exit);
	init_completion(&dev->notify);
	init_completion(&dev->scanning_done);
	init_waitqueue_head(&dev->delay_wait);

	dev->pci = pci;
	dev->irq = -1;

	dev_info(&pci->dev, "Resource length: 0x%x\n",
		 (unsigned int)pci_resource_len(pci, 0));
	dev->addr = pci_resource_start(pci, 0);
	dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
	if (!dev->remap_addr) {
		dev_err(&pci->dev, "ioremap error\n");
		err = -ENXIO;
		goto ioremap_fail;
	}

	/*
	 * Using "unsigned long" cast here to eliminate gcc warning in
	 * 64-bit system
	 */
	dev_info(&pci->dev, "Original address: 0x%lx, remapped address: 0x%lx\n",
		 (unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));

	dev->rtsx_resv_buf = dmam_alloc_coherent(&pci->dev, RTSX_RESV_BUF_LEN,
			&dev->rtsx_resv_buf_addr, GFP_KERNEL);
	if (!dev->rtsx_resv_buf) {
		dev_err(&pci->dev, "alloc dma buffer fail\n");
		err = -ENXIO;
		goto dma_alloc_fail;
	}
	dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
	dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
	dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
	dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr +
				      HOST_CMDS_BUF_LEN;

	dev->chip->rtsx = dev;

	rtsx_init_options(dev->chip);

	dev_info(&pci->dev, "pci->irq = %d\n", pci->irq);

	if (dev->chip->msi_en) {
		if (pci_enable_msi(pci) < 0)
			dev->chip->msi_en = 0;
	}

	if (rtsx_acquire_irq(dev) < 0) {
		err = -EBUSY;
		goto irq_acquire_fail;
	}

	pci_set_master(pci);
	synchronize_irq(dev->irq);

	rtsx_init_chip(dev->chip);

	/*
	 * set the supported max_lun and max_id for the scsi host
	 * NOTE: the minimal value of max_id is 1
	 */
	host->max_id = 1;
	host->max_lun = dev->chip->max_lun;

	/* Start up our control thread */
	th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
	if (IS_ERR(th)) {
		dev_err(&pci->dev, "Unable to start control thread\n");
		err = PTR_ERR(th);
		goto control_thread_fail;
	}
	dev->ctl_thread = th;

	err = scsi_add_host(host, &pci->dev);
	if (err) {
		dev_err(&pci->dev, "Unable to add the scsi host\n");
		goto scsi_add_host_fail;
	}

	/* Start up the thread for delayed SCSI-device scanning */
	th = kthread_run(rtsx_scan_thread, dev, "rtsx-scan");
	if (IS_ERR(th)) {
		dev_err(&pci->dev, "Unable to start the device-scanning thread\n");
		complete(&dev->scanning_done);
		err = PTR_ERR(th);
		goto scan_thread_fail;
	}

	/* Start up the thread for polling thread */
	th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
	if (IS_ERR(th)) {
		dev_err(&pci->dev, "Unable to start the device-polling thread\n");
		err = PTR_ERR(th);
		goto scan_thread_fail;
	}
	dev->polling_thread = th;

	pci_set_drvdata(pci, dev);

	return 0;

	/* We come here if there are any problems */
scan_thread_fail:
	quiesce_and_remove_host(dev);
scsi_add_host_fail:
	complete(&dev->cmnd_ready);
	wait_for_completion(&dev->control_exit);
control_thread_fail:
	free_irq(dev->irq, (void *)dev);
	rtsx_release_chip(dev->chip);
irq_acquire_fail:
	dev->chip->host_cmds_ptr = NULL;
	dev->chip->host_sg_tbl_ptr = NULL;
	if (dev->chip->msi_en)
		pci_disable_msi(dev->pci);
dma_alloc_fail:
	iounmap(dev->remap_addr);
ioremap_fail:
	kfree(dev->chip);
chip_alloc_fail:
	dev_err(&pci->dev, "%s failed\n", __func__);

	return err;
}
Exemple #6
0
static int
igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev)
{
	int err = 0;
#ifndef HAVE_ALLOC_IRQ_VECTORS
	struct msix_entry msix_entry;
#endif

	switch (igbuio_intr_mode_preferred) {
	case RTE_INTR_MODE_MSIX:
		/* Only 1 msi-x vector needed */
#ifndef HAVE_ALLOC_IRQ_VECTORS
		msix_entry.entry = 0;
		if (pci_enable_msix(udev->pdev, &msix_entry, 1) == 0) {
			dev_dbg(&udev->pdev->dev, "using MSI-X");
			udev->info.irq_flags = IRQF_NO_THREAD;
			udev->info.irq = msix_entry.vector;
			udev->mode = RTE_INTR_MODE_MSIX;
			break;
		}
#else
		if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSIX) == 1) {
			dev_dbg(&udev->pdev->dev, "using MSI-X");
			udev->info.irq_flags = IRQF_NO_THREAD;
			udev->info.irq = pci_irq_vector(udev->pdev, 0);
			udev->mode = RTE_INTR_MODE_MSIX;
			break;
		}
#endif

	/* fall back to MSI */
	case RTE_INTR_MODE_MSI:
#ifndef HAVE_ALLOC_IRQ_VECTORS
		if (pci_enable_msi(udev->pdev) == 0) {
			dev_dbg(&udev->pdev->dev, "using MSI");
			udev->info.irq_flags = IRQF_NO_THREAD;
			udev->info.irq = udev->pdev->irq;
			udev->mode = RTE_INTR_MODE_MSI;
			break;
		}
#else
		if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
			dev_dbg(&udev->pdev->dev, "using MSI");
			udev->info.irq_flags = IRQF_NO_THREAD;
			udev->info.irq = pci_irq_vector(udev->pdev, 0);
			udev->mode = RTE_INTR_MODE_MSI;
			break;
		}
#endif
	/* fall back to INTX */
	case RTE_INTR_MODE_LEGACY:
		if (pci_intx_mask_supported(udev->pdev)) {
			dev_dbg(&udev->pdev->dev, "using INTX");
			udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD;
			udev->info.irq = udev->pdev->irq;
			udev->mode = RTE_INTR_MODE_LEGACY;
			break;
		}
		dev_notice(&udev->pdev->dev, "PCI INTX mask not supported\n");
	/* fall back to no IRQ */
	case RTE_INTR_MODE_NONE:
		udev->mode = RTE_INTR_MODE_NONE;
		udev->info.irq = UIO_IRQ_NONE;
		break;

	default:
		dev_err(&udev->pdev->dev, "invalid IRQ mode %u",
			igbuio_intr_mode_preferred);
		udev->info.irq = UIO_IRQ_NONE;
		err = -EINVAL;
	}

	if (udev->info.irq != UIO_IRQ_NONE)
		err = request_irq(udev->info.irq, igbuio_pci_irqhandler,
				  udev->info.irq_flags, udev->info.name,
				  udev);
	dev_info(&udev->pdev->dev, "uio device registered with irq %lx\n",
		 udev->info.irq);

	return err;
}
Exemple #7
0
/**
 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
 * @adapter: board private structure to initialize
 *
 * Attempt to configure the interrupts using the best available
 * capabilities of the hardware and the kernel.
 **/
static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
	struct ixgbe_hw *hw = &adapter->hw;
	int vector, v_budget, err;

	/*
	 * It's easy to be greedy for MSI-X vectors, but it really
	 * doesn't do us much good if we have a lot more vectors
	 * than CPU's.  So let's be conservative and only ask for
	 * (roughly) the same number of vectors as there are CPU's.
	 * The default is to use pairs of vectors.
	 */
	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
	v_budget = min_t(int, v_budget, num_online_cpus());
	v_budget += NON_Q_VECTORS;

	/*
	 * At the same time, hardware can only support a maximum of
	 * hw.mac->max_msix_vectors vectors.  With features
	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
	 * descriptor queues supported by our device.  Thus, we cap it off in
	 * those rare cases where the cpu count also exceeds our vector limit.
	 */
	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);

	/* A failure in MSI-X entry allocation isn't fatal, but it does
	 * mean we disable MSI-X capabilities of the adapter. */
	adapter->msix_entries = kcalloc(v_budget,
					sizeof(struct msix_entry), GFP_KERNEL);
	if (adapter->msix_entries) {
		for (vector = 0; vector < v_budget; vector++)
			adapter->msix_entries[vector].entry = vector;

		ixgbe_acquire_msix_vectors(adapter, v_budget);

		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
			return;
	}

	/* disable DCB if number of TCs exceeds 1 */
	if (netdev_get_num_tc(adapter->netdev) > 1) {
		e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
		netdev_reset_tc(adapter->netdev);

		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;

		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
		adapter->temp_dcb_cfg.pfc_mode_enable = false;
		adapter->dcb_cfg.pfc_mode_enable = false;
	}
	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;

	/* disable SR-IOV */
	ixgbe_disable_sriov(adapter);

	/* disable RSS */
	adapter->ring_feature[RING_F_RSS].limit = 1;

	ixgbe_set_num_queues(adapter);
	adapter->num_q_vectors = 1;

	err = pci_enable_msi(adapter->pdev);
	if (err) {
		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
			     "Unable to allocate MSI interrupt, "
			     "falling back to legacy.  Error: %d\n", err);
		return;
	}
	adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
}
Exemple #8
0
static int c_can_pci_probe(struct pci_dev *pdev,
			   const struct pci_device_id *ent)
{
	struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data;
	struct c_can_priv *priv;
	struct net_device *dev;
	void __iomem *addr;
	int ret;

	ret = pci_enable_device(pdev);
	if (ret) {
		dev_err(&pdev->dev, "pci_enable_device FAILED\n");
		goto out;
	}

	ret = pci_request_regions(pdev, KBUILD_MODNAME);
	if (ret) {
		dev_err(&pdev->dev, "pci_request_regions FAILED\n");
		goto out_disable_device;
	}

	pci_set_master(pdev);
	pci_enable_msi(pdev);

	addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
	if (!addr) {
		dev_err(&pdev->dev,
			"device has no PCI memory resources, "
			"failing adapter\n");
		ret = -ENOMEM;
		goto out_release_regions;
	}

	/* allocate the c_can device */
	dev = alloc_c_can_dev();
	if (!dev) {
		ret = -ENOMEM;
		goto out_iounmap;
	}

	priv = netdev_priv(dev);
	pci_set_drvdata(pdev, dev);
	SET_NETDEV_DEV(dev, &pdev->dev);

	dev->irq = pdev->irq;
	priv->base = addr;

	if (!c_can_pci_data->freq) {
		dev_err(&pdev->dev, "no clock frequency defined\n");
		ret = -ENODEV;
		goto out_free_c_can;
	} else {
		priv->can.clock.freq = c_can_pci_data->freq;
	}

	/* Configure CAN type */
	switch (c_can_pci_data->type) {
	case BOSCH_C_CAN:
		priv->regs = reg_map_c_can;
		break;
	case BOSCH_D_CAN:
		priv->regs = reg_map_d_can;
		priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
		break;
	default:
		ret = -EINVAL;
		goto out_free_c_can;
	}

	/* Configure access to registers */
	switch (c_can_pci_data->reg_align) {
	case C_CAN_REG_ALIGN_32:
		priv->read_reg = c_can_pci_read_reg_aligned_to_32bit;
		priv->write_reg = c_can_pci_write_reg_aligned_to_32bit;
		break;
	case C_CAN_REG_ALIGN_16:
		priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
		priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
		break;
	default:
		ret = -EINVAL;
		goto out_free_c_can;
	}

	ret = register_c_can_dev(dev);
	if (ret) {
		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
			KBUILD_MODNAME, ret);
		goto out_free_c_can;
	}

	dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
		 KBUILD_MODNAME, priv->regs, dev->irq);

	return 0;

out_free_c_can:
	free_c_can_dev(dev);
out_iounmap:
	pci_iounmap(pdev, addr);
out_release_regions:
	pci_disable_msi(pdev);
	pci_clear_master(pdev);
	pci_release_regions(pdev);
out_disable_device:
	pci_disable_device(pdev);
out:
	return ret;
}
Exemple #9
0
static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct pvscsi_adapter *adapter;
	struct pvscsi_adapter adapter_temp;
	struct Scsi_Host *host = NULL;
	unsigned int i;
	unsigned long flags = 0;
	int error;
	u32 max_id;

	error = -ENODEV;

	if (pci_enable_device(pdev))
		return error;

	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
	    pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
	} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
		   pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
	} else {
		printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
		goto out_disable_device;
	}

	/*
	 * Let's use a temp pvscsi_adapter struct until we find the number of
	 * targets on the adapter, after that we will switch to the real
	 * allocated struct.
	 */
	adapter = &adapter_temp;
	memset(adapter, 0, sizeof(*adapter));
	adapter->dev  = pdev;
	adapter->rev = pdev->revision;

	if (pci_request_regions(pdev, "vmw_pvscsi")) {
		printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
		goto out_disable_device;
	}

	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
		if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
			continue;

		if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
			continue;

		break;
	}

	if (i == DEVICE_COUNT_RESOURCE) {
		printk(KERN_ERR
		       "vmw_pvscsi: adapter has no suitable MMIO region\n");
		goto out_release_resources_and_disable;
	}

	adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);

	if (!adapter->mmioBase) {
		printk(KERN_ERR
		       "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
		       i, PVSCSI_MEM_SPACE_SIZE);
		goto out_release_resources_and_disable;
	}

	pci_set_master(pdev);

	/*
	 * Ask the device for max number of targets before deciding the
	 * default pvscsi_ring_pages value.
	 */
	max_id = pvscsi_get_max_targets(adapter);
	printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id);

	if (pvscsi_ring_pages == 0)
		/*
		 * Set the right default value. Up to 16 it is 8, above it is
		 * max.
		 */
		pvscsi_ring_pages = (max_id > 16) ?
			PVSCSI_SETUP_RINGS_MAX_NUM_PAGES :
			PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
	printk(KERN_INFO
	       "vmw_pvscsi: setting ring_pages to %d\n",
	       pvscsi_ring_pages);

	pvscsi_template.can_queue =
		min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
		PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
	pvscsi_template.cmd_per_lun =
		min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
	host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
	if (!host) {
		printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
		goto out_release_resources_and_disable;
	}

	/*
	 * Let's use the real pvscsi_adapter struct here onwards.
	 */
	adapter = shost_priv(host);
	memset(adapter, 0, sizeof(*adapter));
	adapter->dev  = pdev;
	adapter->host = host;
	/*
	 * Copy back what we already have to the allocated adapter struct.
	 */
	adapter->rev = adapter_temp.rev;
	adapter->mmioBase = adapter_temp.mmioBase;

	spin_lock_init(&adapter->hw_lock);
	host->max_channel = 0;
	host->max_lun     = 1;
	host->max_cmd_len = 16;
	host->max_id      = max_id;

	pci_set_drvdata(pdev, host);

	ll_adapter_reset(adapter);

	adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);

	error = pvscsi_allocate_rings(adapter);
	if (error) {
		printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
		goto out_release_resources;
	}

	/*
	 * From this point on we should reset the adapter if anything goes
	 * wrong.
	 */
	pvscsi_setup_all_rings(adapter);

	adapter->cmd_map = kcalloc(adapter->req_depth,
				   sizeof(struct pvscsi_ctx), GFP_KERNEL);
	if (!adapter->cmd_map) {
		printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
		error = -ENOMEM;
		goto out_reset_adapter;
	}

	INIT_LIST_HEAD(&adapter->cmd_pool);
	for (i = 0; i < adapter->req_depth; i++) {
		struct pvscsi_ctx *ctx = adapter->cmd_map + i;
		list_add(&ctx->list, &adapter->cmd_pool);
	}

	error = pvscsi_allocate_sg(adapter);
	if (error) {
		printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
		goto out_reset_adapter;
	}

	if (!pvscsi_disable_msix &&
	    pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
		adapter->use_msix = 1;
	} else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using MSI\n");
		adapter->use_msi = 1;
		adapter->irq = pdev->irq;
	} else {
		printk(KERN_INFO "vmw_pvscsi: using INTx\n");
		adapter->irq = pdev->irq;
		flags = IRQF_SHARED;
	}

	adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
	printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
	       adapter->use_req_threshold ? "en" : "dis");

	error = request_irq(adapter->irq, pvscsi_isr, flags,
			    "vmw_pvscsi", adapter);
	if (error) {
		printk(KERN_ERR
		       "vmw_pvscsi: unable to request IRQ: %d\n", error);
		adapter->irq = 0;
		goto out_reset_adapter;
	}

	error = scsi_add_host(host, &pdev->dev);
	if (error) {
		printk(KERN_ERR
		       "vmw_pvscsi: scsi_add_host failed: %d\n", error);
		goto out_reset_adapter;
	}

	dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
		 adapter->rev, host->host_no);

	pvscsi_unmask_intr(adapter);

	scsi_scan_host(host);

	return 0;

out_reset_adapter:
	ll_adapter_reset(adapter);
out_release_resources:
	pvscsi_release_resources(adapter);
	scsi_host_put(host);
out_disable_device:
	pci_disable_device(pdev);

	return error;

out_release_resources_and_disable:
	pvscsi_release_resources(adapter);
	goto out_disable_device;
}
Exemple #10
0
static int
probe
(
    struct pci_dev             *pci_device,
    const struct pci_device_id *id
)
{
    printk("Found firedancer test device (yeah!)\n");

    pdev = pci_device;
    printk("IRQ pdev pointer = %p\n", pdev);

    if( 0 != pci_enable_device(pci_device) )
    {
        printk("Unable to enable device!\n");
        return(-1);
    }

    if(!pci_intx_mask_supported(pci_device) )
    {
        printk("Intx interrupting not supported!\n");
        return(-1);
    }

#ifdef MSI_TEST
    if(pci_enable_msi(pci_device))
    {
        printk("Unable to switch to MSI interrupt!\n");
        return(-1);
    }
#endif

    if
    (
        0 != request_irq
        (
            pci_device->irq,
            irq_handler,
            IRQF_SHARED,
            DRIVER_NAME,
            (void *)(irq_handler)
        )
    )
    {
        printk("Unable to request interrupt!\n");
        return(-1);
    }

    if( 0 != pci_request_region(pci_device, 0, DRIVER_NAME) )
    {
        printk("Unable to request BAR!\n");
        return(-1);
    }
    map = pci_iomap(pci_device, 0, 4096);

    do_gettimeofday(&start_time);
    iowrite8(1, map);

    printk("Firedancer test device configured!\n");

    return(0);
}
Exemple #11
0
static int enic_set_intr_mode(struct enic *enic)
{
	unsigned int n = ARRAY_SIZE(enic->rq);
	unsigned int m = ARRAY_SIZE(enic->wq);
	unsigned int i;

	/* Set interrupt mode (INTx, MSI, MSI-X) depending
	 * system capabilities.
	 *
	 * Try MSI-X first
	 *
	 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
	 * (the second to last INTR is used for WQ/RQ errors)
	 * (the last INTR is used for notifications)
	 */

	BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
	for (i = 0; i < n + m + 2; i++)
		enic->msix_entry[i].entry = i;

	if (enic->config.intr_mode < 1 &&
	    enic->rq_count >= n &&
	    enic->wq_count >= m &&
	    enic->cq_count >= n + m &&
	    enic->intr_count >= n + m + 2 &&
	    !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {

		enic->rq_count = n;
		enic->wq_count = m;
		enic->cq_count = n + m;
		enic->intr_count = n + m + 2;

		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);

		return 0;
	}

	/* Next try MSI
	 *
	 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
	 */

	if (enic->config.intr_mode < 2 &&
	    enic->rq_count >= 1 &&
	    enic->wq_count >= 1 &&
	    enic->cq_count >= 2 &&
	    enic->intr_count >= 1 &&
	    !pci_enable_msi(enic->pdev)) {

		enic->rq_count = 1;
		enic->wq_count = 1;
		enic->cq_count = 2;
		enic->intr_count = 1;

		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);

		return 0;
	}

	/* Next try INTx
	 *
	 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
	 * (the first INTR is used for WQ/RQ)
	 * (the second INTR is used for WQ/RQ errors)
	 * (the last INTR is used for notifications)
	 */

	if (enic->config.intr_mode < 3 &&
	    enic->rq_count >= 1 &&
	    enic->wq_count >= 1 &&
	    enic->cq_count >= 2 &&
	    enic->intr_count >= 3) {

		enic->rq_count = 1;
		enic->wq_count = 1;
		enic->cq_count = 2;
		enic->intr_count = 3;

		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);

		return 0;
	}

	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);

	return -EINVAL;
}
Exemple #12
0
/**
 * stmmac_pci_probe
 *
 * @pdev: pci device pointer
 * @id: pointer to table of device id/id's.
 *
 * Description: This probing function gets called for all PCI devices which
 * match the ID table and are not "owned" by other driver yet. This function
 * gets passed a "struct pci_dev *" for each device whose entry in the ID table
 * matches the device. The probe functions returns zero when the driver choose
 * to take "ownership" of the device or an error code(-ve no) otherwise.
 */
static int stmmac_pci_probe(struct pci_dev *pdev,
			    const struct pci_device_id *id)
{
	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
	struct plat_stmmacenet_data *plat;
	struct stmmac_resources res;
	int i;
	int ret;

	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
	if (!plat)
		return -ENOMEM;

	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
					   sizeof(*plat->mdio_bus_data),
					   GFP_KERNEL);
	if (!plat->mdio_bus_data)
		return -ENOMEM;

	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
				     GFP_KERNEL);
	if (!plat->dma_cfg)
		return -ENOMEM;

	/* Enable pci device */
	ret = pcim_enable_device(pdev);
	if (ret) {
		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
			__func__);
		return ret;
	}

	/* Get the base address of device */
	for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
		if (pci_resource_len(pdev, i) == 0)
			continue;
		ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
		if (ret)
			return ret;
		break;
	}

	pci_set_master(pdev);

	if (info) {
		info->pdev = pdev;
		if (info->setup) {
			ret = info->setup(plat, info);
			if (ret)
				return ret;
		}
	} else
		stmmac_default_data(plat);

	pci_enable_msi(pdev);

	memset(&res, 0, sizeof(res));
	res.addr = pcim_iomap_table(pdev)[i];
	res.wol_irq = pdev->irq;
	res.irq = pdev->irq;

	return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
Exemple #13
0
//------------------------------------------------------------------------------
static INT initOnePciDev(struct pci_dev* pPciDev_p,
                         const struct pci_device_id* pId_p)
{
    INT         result = 0;
    UINT8       barCount = 0;
    tBarInfo*   pBarInfo = NULL;

    UNUSED_PARAMETER(pId_p);

    if (pcieDrvInstance_l.pPciDev != NULL)
    {
        // This driver is already connected to a PCIe device
        DEBUG_LVL_DRVINTF_TRACE("%s device %s discarded\n",
                                __FUNCTION__, pci_name(pPciDev_p));
        result = -ENODEV;
        goto Exit;
    }

    pcieDrvInstance_l.pPciDev = pPciDev_p;

    // Enable the PCIe device
    DEBUG_LVL_DRVINTF_TRACE("%s enable device\n", __FUNCTION__);
    result = pci_enable_device(pPciDev_p);
    if (result != 0)
    {
        goto Exit;
    }

    DEBUG_LVL_DRVINTF_TRACE("%s request PCIe regions\n", __FUNCTION__);
    result = pci_request_regions(pPciDev_p, PLK_DRV_NAME);
    if (result != 0)
    {
        goto ExitFail;
    }

    // Ignoring whether or not any BAR is accessible
    for (barCount = 0; barCount < OPLK_MAX_BAR_COUNT; barCount++)
    {
        pBarInfo = &pcieDrvInstance_l.aBarInfo[barCount];

        if (pBarInfo->virtualAddr != (ULONG)NULL)
        {
            // The instance is already present
            result = -EIO;
            goto ExitFail;
        }

        // Look for the MMIO BARs
        if ((pci_resource_flags(pPciDev_p, barCount) & IORESOURCE_MEM) == 0)
        {
            continue;
        }

        // get the size of this field
        pBarInfo->length = pci_resource_len(pPciDev_p, barCount);

        // $$: Add check for weird broken IO regions

        pBarInfo->virtualAddr = (ULONG)ioremap_nocache(pci_resource_start(pPciDev_p,
                                                       barCount),
                                                       pBarInfo->length);
        if (pBarInfo->virtualAddr == (ULONG)NULL)
        {
            // Remap of controller's register space failed
            result = -EIO;
            goto ExitFail;
        }

        pBarInfo->busAddr = (ULONG)pci_resource_start(pPciDev_p, barCount);

        DEBUG_LVL_DRVINTF_TRACE("%s() --> ioremap\n", __FUNCTION__);
        DEBUG_LVL_DRVINTF_TRACE("\tbar#\t%u\n", barCount);
        DEBUG_LVL_DRVINTF_TRACE("\tbarLen\t%lu\n", pBarInfo->length);
        DEBUG_LVL_DRVINTF_TRACE("\tbarMap\t0x%lX\n", pBarInfo->virtualAddr);
        DEBUG_LVL_DRVINTF_TRACE("\tbarPhy\t0x%lX\n", pBarInfo->busAddr);
    }

    // Enable PCI busmaster
    DEBUG_LVL_DRVINTF_TRACE("%s enable busmaster\n", __FUNCTION__);
    pci_set_master(pPciDev_p);

    // Enable msi
    DEBUG_LVL_DRVINTF_TRACE("Enable MSI\n");
    result = pci_enable_msi(pPciDev_p);
    if (result != 0)
    {
        DEBUG_LVL_DRVINTF_TRACE("%s Could not enable MSI\n", __FUNCTION__);
    }

    // Install interrupt handler
    DEBUG_LVL_DRVINTF_TRACE("%s install interrupt handler\n", __FUNCTION__);
    result = request_irq(pPciDev_p->irq,
                         pcieDrvIrqHandler,
                         IRQF_SHARED,
                         PLK_DRV_NAME, /* pPciDev_p->dev.name */
                         pPciDev_p);
    if (result != 0)
    {
        goto ExitFail;
    }

    goto Exit;

ExitFail:
    removeOnePciDev(pPciDev_p);

Exit:
    DEBUG_LVL_DRVINTF_TRACE("%s finished with %d\n", __FUNCTION__, result);
    return result;
}
static int __devinit nf10_probe(struct pci_dev *pdev, const struct pci_device_id *id){
	int err;
    int i;
    int ret = -ENODEV;
    struct nf10_card *card;

    // create private structure
	card = (struct nf10_card*)kmalloc(sizeof(struct nf10_card), GFP_KERNEL);
	if (card == NULL) {
		printk(KERN_ERR "nf10: Private card memory alloc failed\n");
		ret = -ENOMEM;
		goto err_out_none;
	}
	memset(card, 0, sizeof(struct nf10_card));

	card->card_id = (int)atomic64_read(&detected_cards);
	memcpy(card->card_name,"nf10 ",sizeof(card->card_name));
	card->card_name[4] = 'a' + (char)card->card_id;

	spin_lock_init(&card->tx_lock);
	spin_lock_init(&card->axi_lock);
    card->pdev = pdev;

	// enable device
	if((err = pci_enable_device(pdev))) {
		printk(KERN_ERR "nf10: Unable to enable the PCI device!\n");
        ret = -ENODEV;
		goto err_out_free_card;
	}

    // set DMA addressing masks (full 64bit)
    if(dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) < 0){
        printk(KERN_ERR "nf10: dma_set_mask fail!\n");
        ret = -EFAULT;
        goto err_out_disable_device;
    }

    // enable BusMaster (enables generation of pcie requests)
	pci_set_master(pdev);

    // enable MSI
    if(pci_enable_msi(pdev) != 0){
        printk(KERN_ERR "nf10: failed to enable MSI interrupts\n");
        ret = -EFAULT;
		goto err_out_clear_master;
    }
	
    // be nice and tell kernel that we'll use this resource
	printk(KERN_INFO "nf10: Reserving memory region for NF10\n");
	if (!request_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0), card->card_name)) {
		printk(KERN_ERR "nf10: Reserving memory region failed\n");
        ret = -ENOMEM;
        goto err_out_msi;
	}
	if (!request_mem_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2), card->card_name)) {
		printk(KERN_ERR "nf10: Reserving memory region failed\n");
        ret = -ENOMEM;
		goto err_out_release_mem_region1;
	}

    // map the cfg memory
	printk(KERN_INFO "nf10: mapping cfg memory\n");
    card->cfg_addr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
	if (!card->cfg_addr)
	{
		printk(KERN_ERR "nf10: cannot mem region len:%lx start:%lx\n",
			(long unsigned)pci_resource_len(pdev, 0),
			(long unsigned)pci_resource_start(pdev, 0));
		goto err_out_iounmap;
	}

	printk(KERN_INFO "nf10: mapping mem memory\n");

    card->tx_dsc = ioremap_nocache(pci_resource_start(pdev, 2) + 0 * 0x00100000ULL, 0x00100000ULL);
    card->rx_dsc = ioremap_nocache(pci_resource_start(pdev, 2) + 1 * 0x00100000ULL, 0x00100000ULL);

	if (!card->tx_dsc || !card->rx_dsc)
	{
		printk(KERN_ERR "nf10: cannot mem region len:%lx start:%lx\n",
			(long unsigned)pci_resource_len(pdev, 2),
			(long unsigned)pci_resource_start(pdev, 2));
		goto err_out_iounmap;
	}

    // reset
    *(((uint64_t*)card->cfg_addr)+30) = 1;
    mmiowb();
    msleep(1);

    // set buffer masks
    card->tx_dsc_mask = 0x000007ffULL;
    card->rx_dsc_mask = 0x000007ffULL;
    card->tx_pkt_mask = 0x00007fffULL;
    card->rx_pkt_mask = 0x00007fffULL;
    card->tx_dne_mask = 0x000007ffULL;
    card->rx_dne_mask = 0x000007ffULL;
    
    if(card->tx_dsc_mask > card->tx_dne_mask){
        *(((uint64_t*)card->cfg_addr)+1) = card->tx_dne_mask;
        card->tx_dsc_mask = card->tx_dne_mask;
    }
    else if(card->tx_dne_mask > card->tx_dsc_mask){
        *(((uint64_t*)card->cfg_addr)+7) = card->tx_dsc_mask;
        card->tx_dne_mask = card->tx_dsc_mask;
    }

    if(card->rx_dsc_mask > card->rx_dne_mask){
        *(((uint64_t*)card->cfg_addr)+9) = card->rx_dne_mask;
        card->rx_dsc_mask = card->rx_dne_mask;
    }
    else if(card->rx_dne_mask > card->rx_dsc_mask){
        *(((uint64_t*)card->cfg_addr)+15) = card->rx_dsc_mask;
        card->rx_dne_mask = card->rx_dsc_mask;
    }

    // allocate buffers to play with
    card->host_tx_dne_ptr = dma_alloc_coherent(&pdev->dev, card->tx_dne_mask+1, &(card->host_tx_dne_dma), GFP_KERNEL);
    card->host_rx_dne_ptr = dma_alloc_coherent(&pdev->dev, card->rx_dne_mask+1, &(card->host_rx_dne_dma), GFP_KERNEL);

    if( (card->host_rx_dne_ptr == NULL) ||
        (card->host_tx_dne_ptr == NULL) ){
        
        printk(KERN_ERR "nf10: cannot allocate dma buffer\n");
        goto err_out_free_private2;
    }

    // set host buffer addresses
    *(((uint64_t*)card->cfg_addr)+16) = card->host_tx_dne_dma;
    *(((uint64_t*)card->cfg_addr)+17) = card->tx_dne_mask;
    *(((uint64_t*)card->cfg_addr)+18) = card->host_rx_dne_dma;
    *(((uint64_t*)card->cfg_addr)+19) = card->rx_dne_mask;

    mmiowb();

    // init mem buffers
    card->mem_tx_dsc.wr_ptr = 0;
    card->mem_tx_dsc.rd_ptr = 0;
    atomic64_set(&card->mem_tx_dsc.cnt, 0);
    card->mem_tx_dsc.mask = card->tx_dsc_mask;
    card->mem_tx_dsc.cl_size = (card->tx_dsc_mask+1)/64;
    card->mem_tx_pkt.wr_ptr = 0;
    card->mem_tx_pkt.rd_ptr = 0;
    atomic64_set(&card->mem_tx_pkt.cnt, 0);
    card->mem_tx_pkt.mask = card->tx_pkt_mask;
    card->mem_tx_pkt.cl_size = (card->tx_pkt_mask+1)/64;
    card->mem_rx_dsc.wr_ptr = 0;
    card->mem_rx_dsc.rd_ptr = 0;
    atomic64_set(&card->mem_rx_dsc.cnt, 0);
    card->mem_rx_dsc.mask = card->rx_dsc_mask;
    card->mem_rx_dsc.cl_size = (card->rx_dsc_mask+1)/64;
    card->mem_rx_pkt.wr_ptr = 0;
    card->mem_rx_pkt.rd_ptr = 0;
    atomic64_set(&card->mem_rx_pkt.cnt, 0);
    card->mem_rx_pkt.mask = card->rx_pkt_mask;
    card->mem_rx_pkt.cl_size = (card->rx_pkt_mask+1)/64;
    card->host_tx_dne.wr_ptr = 0;
    card->host_tx_dne.rd_ptr = 0;
    atomic64_set(&card->host_tx_dne.cnt, 0);
    card->host_tx_dne.mask = card->tx_dne_mask;
    card->host_tx_dne.cl_size = (card->tx_dne_mask+1)/64;
    card->host_rx_dne.wr_ptr = 0;
    card->host_rx_dne.rd_ptr = 0;
    atomic64_set(&card->host_rx_dne.cnt, 0);
    card->host_rx_dne.mask = card->rx_dne_mask;
    card->host_rx_dne.cl_size = (card->rx_dne_mask+1)/64;
    
    for(i = 0; i < card->host_tx_dne.cl_size; i++)
        *(((uint32_t*)card->host_tx_dne_ptr) + i * 16) = 0xffffffff;

    for(i = 0; i < card->host_rx_dne.cl_size; i++)
        *(((uint64_t*)card->host_rx_dne_ptr) + i * 8 + 7) = 0xffffffffffffffffULL;

    // allocate book keeping structures
    card->tx_bk_skb = (struct sk_buff**)kmalloc(card->mem_tx_dsc.cl_size*sizeof(struct sk_buff*), GFP_KERNEL);
    card->tx_bk_dma_addr = (uint64_t*)kmalloc(card->mem_tx_dsc.cl_size*sizeof(uint64_t), GFP_KERNEL);
    card->tx_bk_size = (uint64_t*)kmalloc(card->mem_tx_dsc.cl_size*sizeof(uint64_t), GFP_KERNEL);
    card->tx_bk_port = (uint64_t*)kmalloc(card->mem_tx_dsc.cl_size*sizeof(uint64_t), GFP_KERNEL);

    card->rx_bk_skb = (struct sk_buff**)kmalloc(card->mem_rx_dsc.cl_size*sizeof(struct sk_buff*), GFP_KERNEL);
    card->rx_bk_dma_addr = (uint64_t*)kmalloc(card->mem_rx_dsc.cl_size*sizeof(uint64_t), GFP_KERNEL);
    card->rx_bk_size = (uint64_t*)kmalloc(card->mem_rx_dsc.cl_size*sizeof(uint64_t), GFP_KERNEL);
    card->rx_bk_id = (uint16_t*)kmalloc(card->mem_rx_dsc.cl_size*sizeof(uint16_t), GFP_KERNEL);
    for (i = 0; i < card->mem_rx_dsc.cl_size; i++)
    	card->rx_bk_id[i] = 0xffff;
    card->rx_id = 0;
    
    if(card->tx_bk_skb == NULL || card->tx_bk_dma_addr == NULL || card->tx_bk_size == NULL || card->tx_bk_port == NULL ||
       card->rx_bk_skb == NULL || card->rx_bk_dma_addr == NULL || card->rx_bk_size == NULL || card->rx_bk_id == NULL) {
        printk(KERN_ERR "nf10: kmalloc failed");
        goto err_out_free_private2;
    }

    // store private data to pdev
	pci_set_drvdata(pdev, card);

	axi_wait_write_buffer_empty(card); // initialize axi_write_buffer_level by waiting for an empty axi write buffer
	atomic64_set(&card->axi_access_state, AXI_ACCESS_UNASSIGNED);

    if (!nf10_ael2005_phy_configuration(card)) {    // Read from the AEL2005 PHY chips
        printk(KERN_INFO "nf10: AEL2005 PHY chips are configured\n");
    }
    else {
        printk(KERN_INFO "nf10: AEL2005 PHY chips were already configured\n");
    }

    // success
    ret = nf10iface_probe(pdev, card);
    if(ret < 0){
        printk(KERN_ERR "nf10: failed to initialize interfaces\n");
        goto err_out_free_private2;
    }

    ret = nf10fops_probe(pdev, card);
    if(ret < 0){
        printk(KERN_ERR "nf10: failed to initialize dev file\n");
        goto err_out_free_private2;
    }
    else{
        printk(KERN_INFO "nf10: device ready\n");
        atomic64_inc(&detected_cards);
        return ret;
    }

 // error out
 err_out_free_private2:
    if(card->tx_bk_dma_addr) kfree(card->tx_bk_dma_addr);
    if(card->tx_bk_skb) kfree(card->tx_bk_skb);
    if(card->tx_bk_size) kfree(card->tx_bk_size);
    if(card->tx_bk_port) kfree(card->tx_bk_port);
    if(card->rx_bk_dma_addr) kfree(card->rx_bk_dma_addr);
    if(card->rx_bk_skb) kfree(card->rx_bk_skb);
    if(card->rx_bk_size) kfree(card->rx_bk_size);
    dma_free_coherent(&pdev->dev, card->tx_dne_mask+1, card->host_tx_dne_ptr, card->host_tx_dne_dma);
    dma_free_coherent(&pdev->dev, card->rx_dne_mask+1, card->host_rx_dne_ptr, card->host_rx_dne_dma);
 err_out_iounmap:
    if(card->tx_dsc) iounmap(card->tx_dsc);
    if(card->rx_dsc) iounmap(card->rx_dsc);
    if(card->cfg_addr)   iounmap(card->cfg_addr);
	pci_set_drvdata(pdev, NULL);
	release_mem_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
 err_out_release_mem_region1:
	release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
 err_out_msi:
    pci_disable_msi(pdev);
 err_out_clear_master:
    pci_clear_master(pdev);
 err_out_disable_device:
	pci_disable_device(pdev);
 err_out_free_card:
	kfree(card);
 err_out_none:
	return ret;
}
Exemple #15
0
int _aac_rx_init(struct aac_dev *dev)
{
	unsigned long start;
	unsigned long status;
	int restart = 0;
	int instance = dev->id;
	const char * name = dev->name;

	if (aac_adapter_ioremap(dev, dev->base_size)) {
		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
		goto error_iounmap;
	}

	
	dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
	dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
	dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
	if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
	  !aac_rx_restart_adapter(dev, 0))
		
		while ((++restart < 512) &&
		  (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
	
	status = rx_readl(dev, MUnit.OMRx[0]);
	if (status & KERNEL_PANIC) {
		if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
			goto error_iounmap;
		++restart;
	}
	
	status = rx_readl(dev, MUnit.OMRx[0]);
	if (status & SELF_TEST_FAILED) {
		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
		goto error_iounmap;
	}
	
	if (status & MONITOR_PANIC) {
		printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
		goto error_iounmap;
	}
	start = jiffies;
	
	while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
	{
		if ((restart &&
		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
		  time_after(jiffies, start+HZ*startup_timeout)) {
			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 
					dev->name, instance, status);
			goto error_iounmap;
		}
		if (!restart &&
		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
		  time_after(jiffies, start + HZ *
		  ((startup_timeout > 60)
		    ? (startup_timeout - 60)
		    : (startup_timeout / 2))))) {
			if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
				start = jiffies;
			++restart;
		}
		msleep(1);
	}
	if (restart && aac_commit)
		aac_commit = 1;
	
	dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
	dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
	dev->a_ops.adapter_notify = aac_rx_notify_adapter;
	dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
	dev->a_ops.adapter_check_health = aac_rx_check_health;
	dev->a_ops.adapter_restart = aac_rx_restart_adapter;

	
	aac_adapter_comm(dev, AAC_COMM_PRODUCER);
	aac_adapter_disable_int(dev);
	rx_writel(dev, MUnit.ODR, 0xffffffff);
	aac_adapter_enable_int(dev);

	if (aac_init_adapter(dev) == NULL)
		goto error_iounmap;
	aac_adapter_comm(dev, dev->comm_interface);
	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
			IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
		if (dev->msi)
			pci_disable_msi(dev->pdev);
		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
			name, instance);
		goto error_iounmap;
	}
	aac_adapter_enable_int(dev);
	
	aac_rx_start_adapter(dev);

	return 0;

error_iounmap:

	return -1;
}
/**
 * This function is called by the PCI core when it has a struct pci_dev that it 
 * thinks the driver wants to control. It will allocate the memory for the struct
 * alt_up_pci_dev, initialize it correctly and dynamically allocate a character
 * device node.
 *
 * @param[in] dev The pointer to the pci device that evokes the probe function.
 * @param[in] id  The pci_id_table of the driver.
 * 
 * @return Return 0 on success.
 */
static int  __devinit alt_up_pci_probe (struct pci_dev *dev, const struct pci_device_id *id) {

	int i, retval = 0;

	// allocate the memory for the struct alt_up_pci_dev
	struct alt_up_pci_dev *mydev = kmalloc( sizeof(struct alt_up_pci_dev), GFP_KERNEL );
	if (mydev == NULL){
		printk(KERN_DEBUG "kmalloc() memory for struct alt_up_pci_dev failed. \n");
		goto err_alloc_dev;
	}
	
	// save the pointers for the future usage
	pci_set_drvdata(dev, (void *)mydev);
	mydev->pci_dev = dev;
	
	// wake up the device             
	retval = pci_enable_device(dev);
	if (retval) {
		printk(KERN_DEBUG "pci_enable_device() failed. \n");
		goto err_enable_device;
	}

	// enables bus-mastering for device dev       
	pci_set_master(dev);
	
	// reserved PCI I/O and memory resources
	retval = pci_request_regions(dev, DRV_NAME);
	if (retval) {
		printk(KERN_DEBUG "pci_request_regions() failed. \n");
		goto err_request_regions;
	}
			
	// set the DMA addressing limitation
	retval = pci_set_dma_mask(dev, DMA_BIT_MASK( pci_dma_bit_range ));
	if (retval) {
		printk(KERN_DEBUG "pci_set_dma_mask() failed. \n");
		goto err_set_dma_mask;      
	}

	retval = pci_set_consistent_dma_mask(dev,DMA_BIT_MASK( pci_dma_bit_range ));
	if(retval) {
		printk(KERN_DEBUG "pci_set_consistent_dma_mask() failed. \n");
		goto err_set_dma_mask;
	}

	// set __iomem address, accessed by ioread, iowrite
	for (i = 0; i < MAX_NUM_OF_BARS; i ++) {
		if ( pci_resource_end(dev, i) != pci_resource_start(dev, i) ){

			/* create a virtual mapping cookie for a PCI BAR, 
			 * second arg is BAR, third is maxlen (0 means complete BAR) */
			mydev->bar[i] = pci_iomap(dev, i, 0); 
			if( !mydev->bar[i] ){
				printk(KERN_DEBUG "pci_iomap() failed. \n");
				goto err_iomap;
			}
			
			printk(KERN_DEBUG DRV_NAME " BAR%d initialized.\n", i);
			mydev->bar_size[i] = pci_resource_end(dev, i) - pci_resource_start(dev, i) + 1;
			
		} else  mydev->bar[i] = NULL;
	}

	// initialize the alt_up_pci_dev struct
	retval = alt_up_pci_dev_init(mydev);
	if(retval) {
		printk(KERN_DEBUG "alt_up_pci_dev_init() failed. \n");
		goto err_dev_init;
	}
	
	// have MSI enabled on its device function    
	retval = pci_enable_msi(dev);
	if (retval) {
		printk(KERN_DEBUG "pci_enable_msi() failed. \n");
		goto err_enable_msi;        
	}
			
	// request irq line for interrupt
	mydev->irq_line = dev->irq;
	retval = request_irq((int)mydev->irq_line, (void*)alt_up_pci_irqhandler, IRQF_SHARED, DRV_NAME, (void *)mydev);
	if (retval) {
		printk(KERN_DEBUG "pci_request_irq() failed. \n");
		goto err_request_irq;
	}

	// write irq_line to the PCI configuration space
	retval = pci_write_config_byte(dev, PCI_INTERRUPT_LINE, mydev->irq_line);
	if (retval) {
		printk(KERN_DEBUG "pci_read_config() failed. \n");
		goto err_write_config;       
	}  

	/* dynamically allocate a character device node
	 * 0 : requested minor
	 * 1 : count 
	 */
	retval = alloc_chrdev_region(&mydev->cdev_no, 0, 1, DRV_NAME);
	if(retval) {
		printk(KERN_DEBUG "alloc_chrdev_region() failed. \n");
		goto err_alloc_chrdev;
	}

	// init the cdev
	cdev_init(&mydev->cdev, &alt_up_pci_fops);
	mydev->cdev.owner = THIS_MODULE;
	mydev->cdev.ops = &alt_up_pci_fops;
	
	// add the cdev to kernel, from now on, the driver is alive
	retval = cdev_add(&mydev->cdev, mydev->cdev_no, 1);   /* 1: count */
	if(retval) {
		printk(KERN_DEBUG "cdev_add() failed. \n");
		goto err_cdev_add;
	}
	
	return 0;
	
	
	//cdev_del(&mydev->cdev);
err_cdev_add:
	unregister_chrdev_region(mydev->cdev_no, 1);  
err_alloc_chrdev:

err_write_config:
	free_irq(mydev->irq_line, (void *)mydev);
err_request_irq:
	pci_disable_msi(dev);
err_enable_msi:
	alt_up_pci_dev_exit(mydev);
err_dev_init:
	for (i = 0; i < MAX_NUM_OF_BARS; i ++) {
		if( mydev->bar[i] != NULL )
			pci_iounmap(dev, mydev->bar[i]);
	}
	goto err_set_dma_mask;
err_iomap:     
	for ( i = i - 1; i >= 0; i --){
		if( mydev->bar[i] != NULL)
			pci_iounmap(dev, mydev->bar[i]);
	}
err_set_dma_mask:
	pci_release_regions(dev);
err_request_regions:
	pci_disable_device(dev);
err_enable_device:
	kfree(mydev);
err_alloc_dev:
	printk("alt_up_pci_probe() failed with error: %d \n ", retval);

	return retval;        
}
int _aac_rx_init(struct aac_dev *dev)
{
	unsigned long start;
	unsigned long status;
	int restart = 0;
	int instance = dev->id;
	const char * name = dev->name;

	if (aac_adapter_ioremap(dev, dev->base_size)) {
		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
		goto error_iounmap;
	}

	/* Failure to reset here is an option ... */
	dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
	dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
	dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
	if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
	  !aac_rx_restart_adapter(dev, 0))
		/* Make sure the Hardware FIFO is empty */
		while ((++restart < 512) &&
		  (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
	/*
	 *	Check to see if the board panic'd while booting.
	 */
	status = rx_readl(dev, MUnit.OMRx[0]);
	if (status & KERNEL_PANIC) {
		if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
			goto error_iounmap;
		++restart;
	}
	/*
	 *	Check to see if the board failed any self tests.
	 */
	status = rx_readl(dev, MUnit.OMRx[0]);
	if (status & SELF_TEST_FAILED) {
		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
		goto error_iounmap;
	}
	/*
	 *	Check to see if the monitor panic'd while booting.
	 */
	if (status & MONITOR_PANIC) {
		printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
		goto error_iounmap;
	}
	start = jiffies;
	/*
	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
	 */
	while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
	{
		if ((restart &&
		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
		  time_after(jiffies, start+HZ*startup_timeout)) {
			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 
					dev->name, instance, status);
			goto error_iounmap;
		}
		if (!restart &&
		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
		  time_after(jiffies, start + HZ *
		  ((startup_timeout > 60)
		    ? (startup_timeout - 60)
		    : (startup_timeout / 2))))) {
			if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
				start = jiffies;
			++restart;
		}
		msleep(1);
	}
	if (restart && aac_commit)
		aac_commit = 1;
	/*
	 *	Fill in the common function dispatch table.
	 */
	dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
	dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
	dev->a_ops.adapter_notify = aac_rx_notify_adapter;
	dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
	dev->a_ops.adapter_check_health = aac_rx_check_health;
	dev->a_ops.adapter_restart = aac_rx_restart_adapter;

	/*
	 *	First clear out all interrupts.  Then enable the one's that we
	 *	can handle.
	 */
	aac_adapter_comm(dev, AAC_COMM_PRODUCER);
	aac_adapter_disable_int(dev);
	rx_writel(dev, MUnit.ODR, 0xffffffff);
	aac_adapter_enable_int(dev);

	if (aac_init_adapter(dev) == NULL)
		goto error_iounmap;
	aac_adapter_comm(dev, dev->comm_interface);
	dev->sync_mode = 0;	/* sync. mode not supported */
	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
			IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
		if (dev->msi)
			pci_disable_msi(dev->pdev);
		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
			name, instance);
		goto error_iounmap;
	}
	dev->dbg_base = dev->scsi_host_ptr->base;
	dev->dbg_base_mapped = dev->base;
	dev->dbg_size = dev->base_size;

	aac_adapter_enable_int(dev);
	/*
	 *	Tell the adapter that all is configured, and it can
	 * start accepting requests
	 */
	aac_rx_start_adapter(dev);

	return 0;

error_iounmap:

	return -1;
}
/**
 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
 * @adapter: board private structure to initialize
 *
 * Attempt to configure the interrupts using the best available
 * capabilities of the hardware and the kernel.
 **/
static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
	struct ixgbe_hw *hw = &adapter->hw;
	int err = 0;
	int vector, v_budget;

	/*
	 * It's easy to be greedy for MSI-X vectors, but it really
	 * doesn't do us much good if we have a lot more vectors
	 * than CPU's.  So let's be conservative and only ask for
	 * (roughly) the same number of vectors as there are CPU's.
	 * The default is to use pairs of vectors.
	 */
	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
	v_budget = min_t(int, v_budget, num_online_cpus());
	v_budget += NON_Q_VECTORS;

	/*
	 * At the same time, hardware can only support a maximum of
	 * hw.mac->max_msix_vectors vectors.  With features
	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
	 * descriptor queues supported by our device.  Thus, we cap it off in
	 * those rare cases where the cpu count also exceeds our vector limit.
	 */
	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);

	/* A failure in MSI-X entry allocation isn't fatal, but it does
	 * mean we disable MSI-X capabilities of the adapter. */
	adapter->msix_entries = kcalloc(v_budget,
					sizeof(struct msix_entry), GFP_KERNEL);
	if (adapter->msix_entries) {
		for (vector = 0; vector < v_budget; vector++)
			adapter->msix_entries[vector].entry = vector;

		ixgbe_acquire_msix_vectors(adapter, v_budget);

		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
			goto out;
	}

	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
	adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
		e_err(probe,
		      "ATR is not supported while multiple "
		      "queues are disabled.  Disabling Flow Director\n");
	}
	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
	adapter->atr_sample_rate = 0;
	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
		ixgbe_disable_sriov(adapter);

	err = ixgbe_set_num_queues(adapter);
	if (err)
		return err;

	err = pci_enable_msi(adapter->pdev);
	if (!err) {
		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
	} else {
		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
			     "Unable to allocate MSI interrupt, "
			     "falling back to legacy.  Error: %d\n", err);
		/* reset err */
		err = 0;
	}

out:
	return err;
}
static int nlm_nlm_common_generic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{

  int err;
  unsigned long base;
#ifdef CONFIG_NLM_COMMON
  static int x=0;
  uint32_t tmp;

  if(!x){
    /*Setting MaxOutSplitTrans to zero*/
    pci_read_config_dword(pdev,0x40,&tmp); 
    tmp = tmp & ~(0x7U<<20);
    pci_write_config_dword(pdev,0x40,tmp);
    pci_read_config_dword(pdev,0x40,&tmp); 
    x=1;
    return -1;
  }
#endif
  nlm_pdev = pdev;
#ifndef CONFIG_NLM_COMMON
  if((err = pci_enable_device(pdev)))
  {
    ErrorMsg("Cannot enable PCI device, aborting.");
    return err;
  }
#endif
  if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM))
  {
    ErrorMsg("Cannot find proper PCI device " 
	     	    "base address BAR0, aborting.\n");
    err = -ENODEV;
    pci_disable_device(pdev);
    return err;
  }
  err = pci_request_region(pdev, 0, NLM_DRIVER);
  if (err)
  {
    ErrorMsg("Cannot obtain PCI resources, aborting.");
    err = -ENODEV;
    pci_disable_device(pdev);
    return err;
  }
  pci_set_master(pdev);
#if !defined(CONFIG_NLM_COMMON) && defined(XLR_MSI_IS_SUPPORTED)
  if ((err = pci_find_capability(pdev, PCI_CAP_ID_MSI)))
  {
    Message("Device is MSI capable..Enabling MSI");
    err = pci_enable_msi(pdev);
    msi_irq = pdev->irq;
    if(err == 0) {
      Message("MSI Enabled");
	}
    else{
      ErrorMsg("MSI Enable failed");
      return err;
    }
  }
  else
  {
    ErrorMsg("Device is NOT MSI capable");
    err = -ENODEV;
    pci_disable_device(pdev);
    return err;
  }
#endif
  base = pci_resource_start(pdev, 0);
  nlm_nlm_common_shared_mem_base_host = (unsigned volatile int *)
			ioremap_nocache(base,pci_resource_len(pdev, 0));
  printk("Device Memory Available @ %#x \n",
			(uint32_t)(unsigned long)nlm_nlm_common_shared_mem_base_host);
  if(nlm_nlm_common_shared_mem_base_host == NULL)
  {
    err = -ENODEV;
#if !defined(CONFIG_NLM_COMMON) && defined(XLR_MSI_IS_SUPPORTED)
    pci_disable_msi(pdev);
#endif
#ifndef CONFIG_NLM_COMMON
    pci_disable_device(pdev);
#endif
    return err;
  }

#ifdef XLR_MAILBOX_IS_SUPPORTED
  /* Use BAR2 as the mailbox address */
  base = pci_resource_start(pdev, 2);
  nlm_nlm_common_mailbox_addr = (unsigned int *)ioremap(base,pci_resource_len(pdev, 2));

  if(nlm_nlm_common_mailbox_addr == NULL || base == 0)
  {
    ErrorMsg("MailBox Is Not Supported");
    err = -ENODEV;
    iounmap((void *)nlm_nlm_common_shared_mem_base_host);
		nlm_nlm_common_mailbox_addr	= nlm_nlm_common_shared_mem_base_host = 0;

#if !defined(CONFIG_NLM_COMMON) && defined(XLR_MSI_IS_SUPPORTED)
    pci_disable_msi(pdev);
#endif
#ifndef CONFIG_NLM_COMMON
    pci_disable_device(pdev);
#endif
    return err;
  }
#endif 

#if !defined(CONFIG_NLM_COMMON) && defined(XLR_MSI_IS_SUPPORTED)
  if((err = request_irq(msi_irq,nlm_nlm_common_generic_msi_handler, IRQF_DISABLED,												"nlm_nlm_common_generic_msi_handler", (void *)NULL)))
	{
		ErrorMsg("Cant Register interrupt handler irq %d",msi_irq);
		iounmap((void *)nlm_nlm_common_shared_mem_base_host);
#ifdef XLR_MAILBOX_IS_SUPPORTED
		iounmap((void *)nlm_nlm_common_mailbox_addr);
#endif
		pci_disable_msi(pdev);
		pci_disable_device(pdev);
		return err ;
	}
//pci_set_mwi(pdev); 
#endif

#ifdef CONFIG_SYSCTL
  nlm_pcix_sysctl_header = register_sysctl_table(nlm_pcix_sysctl_tbl);
  if(nlm_pcix_sysctl_header == NULL) {
	  printk(KERN_WARNING "Could not register to sysctl\n");
  }
  else{
	  printk("nlm_pcix: registered with sysctl\n");
  }
#endif

  return 0;
}
/**
 * mei_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in mei_txe_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
    struct mei_device *dev;
    struct mei_txe_hw *hw;
    int err;
    int i;

    /* enable pci dev */
    err = pci_enable_device(pdev);
    if (err) {
        dev_err(&pdev->dev, "failed to enable pci device.\n");
        goto end;
    }
    /* set PCI host mastering  */
    pci_set_master(pdev);
    /* pci request regions for mei driver */
    err = pci_request_regions(pdev, KBUILD_MODNAME);
    if (err) {
        dev_err(&pdev->dev, "failed to get pci regions.\n");
        goto disable_device;
    }

    err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
    if (err) {
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
        if (err) {
            dev_err(&pdev->dev, "No suitable DMA available.\n");
            goto release_regions;
        }
    }

    /* allocates and initializes the mei dev structure */
    dev = mei_txe_dev_init(pdev);
    if (!dev) {
        err = -ENOMEM;
        goto release_regions;
    }
    hw = to_txe_hw(dev);


    err = mei_reserver_dma_acpi(dev);
    if (err)
        err = mei_alloc_dma(dev);
    if (err)
        goto free_device;

    /* mapping  IO device memory */
    for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
        hw->mem_addr[i] = pci_iomap(pdev, i, 0);
        if (!hw->mem_addr[i]) {
            dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
            err = -ENOMEM;
            goto free_device;
        }
    }


    pci_enable_msi(pdev);

    /* clear spurious interrupts */
    mei_clear_interrupts(dev);

    /* request and enable interrupt  */
    if (pci_dev_msi_enabled(pdev))
        err = request_threaded_irq(pdev->irq,
                                   NULL,
                                   mei_txe_irq_thread_handler,
                                   IRQF_ONESHOT, KBUILD_MODNAME, dev);
    else
        err = request_threaded_irq(pdev->irq,
                                   mei_txe_irq_quick_handler,
                                   mei_txe_irq_thread_handler,
                                   IRQF_SHARED, KBUILD_MODNAME, dev);
    if (err) {
        dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
                pdev->irq);
        goto free_device;
    }

    if (mei_start(dev)) {
        dev_err(&pdev->dev, "init hw failure.\n");
        err = -ENODEV;
        goto release_irq;
    }

    err = mei_txe_setup_satt2(dev,
                              dma_to_phys(&dev->pdev->dev, hw->pool_paddr), hw->pool_size);
    if (err)
        goto release_irq;


    err = mei_register(dev);
    if (err)
        goto release_irq;

    pci_set_drvdata(pdev, dev);

    hw->mdev = mei_mm_init(&dev->pdev->dev,
                           hw->pool_vaddr, hw->pool_paddr, hw->pool_size);

    if (IS_ERR_OR_NULL(hw->mdev))
        goto deregister_mei;

    pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
    pm_runtime_use_autosuspend(&pdev->dev);

    pm_runtime_mark_last_busy(&pdev->dev);

    /*
    * For not wake-able HW runtime pm framework
    * can't be used on pci device level.
    * Use domain runtime pm callbacks instead.
    */
    if (!pci_dev_run_wake(pdev))
        mei_txe_set_pm_domain(dev);

    pm_runtime_put_noidle(&pdev->dev);

    if (!nopg)
        pm_runtime_allow(&pdev->dev);

    return 0;

deregister_mei:
    mei_deregister(dev);
release_irq:

    mei_cancel_work(dev);

    /* disable interrupts */
    mei_disable_interrupts(dev);

    free_irq(pdev->irq, dev);
    pci_disable_msi(pdev);

free_device:
    if (hw->pool_release)
        hw->pool_release(hw);

    mei_txe_pci_iounmap(pdev, hw);

    kfree(dev);
release_regions:
    pci_release_regions(pdev);
disable_device:
    pci_disable_device(pdev);
end:
    dev_err(&pdev->dev, "initialization failed.\n");
    return err;
}
Exemple #21
0
static int rtsx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
	struct Scsi_Host *host;
	struct rtsx_dev *dev;
	int err = 0;
	struct task_struct *th;

	printk(KERN_INFO "--- %s ---\n", DRIVER_MAKE_TIME);

	err = pci_enable_device(pci);
	if (err < 0) {
		printk(KERN_ERR "PCI enable device failed!\n");
		return err;
	}

	err = pci_request_regions(pci, CR_DRIVER_NAME);
	if (err < 0) {
		printk(KERN_ERR "PCI request regions for %s failed!\n", CR_DRIVER_NAME);
		pci_disable_device(pci);
		return err;
	}

	/*
	 * Ask the SCSI layer to allocate a host structure, with extra
	 * space at the end for our private rtsx_dev structure.
	 */
	host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
	if (!host) {
		printk(KERN_ERR "Unable to allocate the scsi host\n");
		pci_release_regions(pci);
		pci_disable_device(pci);
		return -ENOMEM;
	}

	dev = host_to_rtsx(host);
	memset(dev, 0, sizeof(struct rtsx_dev));

	dev->chip = (struct rtsx_chip *)kmalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
	if (dev->chip == NULL) {
		goto errout;
	}
	memset(dev->chip, 0, sizeof(struct rtsx_chip));

	spin_lock_init(&dev->reg_lock);
	mutex_init(&(dev->dev_mutex));
	sema_init(&(dev->sema), 0);
	init_completion(&(dev->notify));
	init_waitqueue_head(&dev->delay_wait);

	dev->pci = pci;
	dev->irq = -1;

	printk(KERN_INFO "Resource length: 0x%x\n", (unsigned int)pci_resource_len(pci,0));
	dev->addr = pci_resource_start(pci, 0);
	dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci,0));
	if (dev->remap_addr == NULL) {
		printk(KERN_ERR "ioremap error\n");
		err = -ENXIO;
		goto errout;
	}

	printk(KERN_INFO "Original address: 0x%lx, remapped address: 0x%lx\n", 
			(unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));

	dev->rtsx_resv_buf = dma_alloc_coherent(&(pci->dev), RTSX_RESV_BUF_LEN, 
			&(dev->rtsx_resv_buf_addr), GFP_KERNEL);
	if (dev->rtsx_resv_buf == NULL) {
		printk(KERN_ERR "alloc dma buffer fail\n");
		err = -ENXIO;
		goto errout;
	}
	dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
	dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
	dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
	dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;

	dev->chip->rtsx = dev;
	
	rtsx_init_options(dev->chip);

	printk(KERN_INFO "pci->irq = %d\n", pci->irq);
	
	if (dev->chip->msi_en) {
		if (pci_enable_msi(pci) < 0) {
			dev->chip->msi_en = 0;
		}
	}
	
	if (rtsx_acquire_irq(dev) < 0) {
		err = -EBUSY;
		goto errout;
	}

	pci_set_master(pci);
	synchronize_irq(dev->irq);

	err = scsi_add_host(host, &pci->dev);
	if (err) {
		printk(KERN_ERR "Unable to add the scsi host\n");
		goto errout;
	}

	rtsx_init_chip(dev->chip);
	
	
	th = kthread_create(rtsx_control_thread, dev, CR_DRIVER_NAME);
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start control thread\n");
		err = PTR_ERR(th);
		goto errout;
	}

	/* Take a reference to the host for the control thread and
	 * count it among all the threads we have launched.  Then
	 * start it up. */
	scsi_host_get(rtsx_to_host(dev));
	atomic_inc(&total_threads);
	wake_up_process(th);
	
	
	th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start the device-scanning thread\n");
		quiesce_and_remove_host(dev);
		err = PTR_ERR(th);
		goto errout;
	}

	/* Take a reference to the host for the scanning thread and
	 * count it among all the threads we have launched.  Then
	 * start it up. */
	scsi_host_get(rtsx_to_host(dev));
	atomic_inc(&total_threads);
	wake_up_process(th);

	
	th = kthread_create(rtsx_polling_thread, dev, "rtsx-polling");
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start the device-polling thread\n");
		quiesce_and_remove_host(dev);
		err = PTR_ERR(th);
		goto errout;
	}

	/* Take a reference to the host for the polling thread and
	 * count it among all the threads we have launched.  Then
	 * start it up. */
	scsi_host_get(rtsx_to_host(dev));
	atomic_inc(&total_threads);
	wake_up_process(th);

	pci_set_drvdata(pci, dev);

	return 0;

	
errout:
	printk(KERN_ERR "rtsx_probe() failed\n");
	release_everything(dev);

	return err;
}
Exemple #22
0
/* Bus ops */
static int wil_if_pcie_enable(struct wil6210_priv *wil)
{
	struct pci_dev *pdev = wil->pdev;
	int rc;
	/* on platforms with buggy ACPI, pdev->msi_enabled may be set to
	 * allow pci_enable_device to work. This indicates INTx was not routed
	 * and only MSI should be used
	 */
	int msi_only = pdev->msi_enabled;

	wil_dbg_misc(wil, "%s()\n", __func__);

	pdev->msi_enabled = 0;

	pci_set_master(pdev);

	/*
	 * how many MSI interrupts to request?
	 */
	switch (use_msi) {
	case 3:
	case 1:
		wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
		break;
	case 0:
		wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
		break;
	default:
		wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
		use_msi = 1;
	}

	if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
		wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
		use_msi = 1;
	}

	if (use_msi == 1 && pci_enable_msi(pdev)) {
		wil_err(wil, "pci_enable_msi failed, use INTx\n");
		use_msi = 0;
	}

	wil->n_msi = use_msi;

	if ((wil->n_msi == 0) && msi_only) {
		wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
		rc = -ENODEV;
		goto stop_master;
	}

	rc = wil6210_init_irq(wil, pdev->irq);
	if (rc)
		goto stop_master;

	/* need reset here to obtain MAC */
	mutex_lock(&wil->mutex);
	rc = wil_reset(wil);
	mutex_unlock(&wil->mutex);
	if (debug_fw)
		rc = 0;
	if (rc)
		goto release_irq;

	return 0;

 release_irq:
	wil6210_fini_irq(wil, pdev->irq);
	/* safe to call if no MSI */
	pci_disable_msi(pdev);
 stop_master:
	pci_clear_master(pdev);
	return rc;
}
Exemple #23
0
/**
 * mei_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in kcs_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct mei_device *dev;
	struct mei_me_hw *hw;
	int err;


	if (!mei_me_quirk_probe(pdev, ent)) {
		err = -ENODEV;
		goto end;
	}

	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for mei driver */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "failed to get pci regions.\n");
		goto disable_device;
	}

	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {

		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
		if (err)
			err = dma_set_coherent_mask(&pdev->dev,
						    DMA_BIT_MASK(32));
	}
	if (err) {
		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
		goto release_regions;
	}


	/* allocates and initializes the mei dev structure */
	dev = mei_me_dev_init(pdev);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	hw = to_me_hw(dev);
	/* mapping  IO device memory */
	hw->mem_addr = pci_iomap(pdev, 0, 0);
	if (!hw->mem_addr) {
		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
		err = -ENOMEM;
		goto free_device;
	}
	pci_enable_msi(pdev);

	 /* request and enable interrupt */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_me_irq_thread_handler,
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
	else
		err = request_threaded_irq(pdev->irq,
			mei_me_irq_quick_handler,
			mei_me_irq_thread_handler,
			IRQF_SHARED, KBUILD_MODNAME, dev);

	if (err) {
		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
		       pdev->irq);
		goto disable_msi;
	}

	if (mei_start(dev)) {
		dev_err(&pdev->dev, "init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}

	err = mei_register(dev);
	if (err)
		goto release_irq;

	pci_set_drvdata(pdev, dev);

	schedule_delayed_work(&dev->timer_work, HZ);

	dev_dbg(&pdev->dev, "initialization successful.\n");

	return 0;

release_irq:
	mei_cancel_work(dev);
	mei_disable_interrupts(dev);
	free_irq(pdev->irq, dev);
disable_msi:
	pci_disable_msi(pdev);
	pci_iounmap(pdev, hw->mem_addr);
free_device:
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	dev_err(&pdev->dev, "initialization failed.\n");
	return err;
}
Exemple #24
0
static int xilly_probe(struct pci_dev *pdev,
				 const struct pci_device_id *ent)
{
	struct xilly_endpoint *endpoint;
	int rc = 0;

	endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw);

	if (!endpoint)
		return -ENOMEM;

	pci_set_drvdata(pdev, endpoint);

	rc = pcim_enable_device(pdev);

	if (rc) {
		dev_err(endpoint->dev,
			"pcim_enable_device() failed. Aborting.\n");
		return rc;
	}

	/* L0s has caused packet drops. No power saving, thank you. */

	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);

	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		dev_err(endpoint->dev,
			"Incorrect BAR configuration. Aborting.\n");
		return -ENODEV;
	}

	rc = pcim_iomap_regions(pdev, 0x01, xillyname);
	if (rc) {
		dev_err(endpoint->dev,
			"pcim_iomap_regions() failed. Aborting.\n");
		return rc;
	}

	endpoint->registers = pcim_iomap_table(pdev)[0];

	pci_set_master(pdev);

	/* Set up a single MSI interrupt */
	if (pci_enable_msi(pdev)) {
		dev_err(endpoint->dev,
			"Failed to enable MSI interrupts. Aborting.\n");
		return -ENODEV;
	}
	rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0,
			      xillyname, endpoint);

	if (rc) {
		dev_err(endpoint->dev,
			"Failed to register MSI handler. Aborting.\n");
		return -ENODEV;
	}

	/*
	 * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1
	 * is the right thing. But some unclever PCIe drivers report it's OK
	 * when the hardware drops those 64-bit PCIe packets. So trust
	 * nobody and use 32 bits DMA addressing in any case.
	 */

	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
		endpoint->dma_using_dac = 0;
	else {
		dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
		return -ENODEV;
	}

	rc = xillybus_endpoint_discovery(endpoint);

	if (!rc)
		return 0;

	xillybus_do_cleanup(&endpoint->cleanup, endpoint);

	return rc;
}
Exemple #25
0
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
    struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
    struct iwl_bus *bus;
    struct iwl_pci_bus *pci_bus;
    u16 pci_cmd;
    int err;

    bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL);
    if (!bus) {
        dev_printk(KERN_ERR, &pdev->dev,
                   "Couldn't allocate iwl_pci_bus");
        err = -ENOMEM;
        goto out_no_pci;
    }

    pci_bus = IWL_BUS_GET_PCI_BUS(bus);
    pci_bus->pci_dev = pdev;

    pci_set_drvdata(pdev, bus);

    /* W/A - seems to solve weird behavior. We need to remove this if we
     * don't want to stay in L1 all the time. This wastes a lot of power */
    pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
                           PCIE_LINK_STATE_CLKPM);

    if (pci_enable_device(pdev)) {
        err = -ENODEV;
        goto out_no_pci;
    }

    pci_set_master(pdev);

    err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
    if (!err)
        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
    if (err) {
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
        if (!err)
            err = pci_set_consistent_dma_mask(pdev,
                                              DMA_BIT_MASK(32));
        /* both attempts failed: */
        if (err) {
            dev_printk(KERN_ERR, bus->dev,
                       "No suitable DMA available.\n");
            goto out_pci_disable_device;
        }
    }

    err = pci_request_regions(pdev, DRV_NAME);
    if (err) {
        dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed");
        goto out_pci_disable_device;
    }

    pci_bus->hw_base = pci_iomap(pdev, 0, 0);
    if (!pci_bus->hw_base) {
        dev_printk(KERN_ERR, bus->dev, "pci_iomap failed");
        err = -ENODEV;
        goto out_pci_release_regions;
    }

    dev_printk(KERN_INFO, &pdev->dev,
               "pci_resource_len = 0x%08llx\n",
               (unsigned long long) pci_resource_len(pdev, 0));
    dev_printk(KERN_INFO, &pdev->dev,
               "pci_resource_base = %p\n", pci_bus->hw_base);

    dev_printk(KERN_INFO, &pdev->dev,
               "HW Revision ID = 0x%X\n", pdev->revision);

    /* We disable the RETRY_TIMEOUT register (0x41) to keep
     * PCI Tx retries from interfering with C3 CPU state */
    pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);

    err = pci_enable_msi(pdev);
    if (err)
        dev_printk(KERN_ERR, &pdev->dev,
                   "pci_enable_msi failed(0X%x)", err);

    /* TODO: Move this away, not needed if not MSI */
    /* enable rfkill interrupt: hw bug w/a */
    pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
    if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
        pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
        pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
    }

    bus->dev = &pdev->dev;
    bus->irq = pdev->irq;
    bus->ops = &bus_ops_pci;

    err = iwl_probe(bus, &trans_ops_pcie, cfg);
    if (err)
        goto out_disable_msi;
    return 0;

out_disable_msi:
    pci_disable_msi(pdev);
    pci_iounmap(pdev, pci_bus->hw_base);
out_pci_release_regions:
    pci_set_drvdata(pdev, NULL);
    pci_release_regions(pdev);
out_pci_disable_device:
    pci_disable_device(pdev);
out_no_pci:
    kfree(bus);
    return err;
}
Exemple #26
0
static int enic_set_intr_mode(struct enic *enic)
{
	unsigned int n = 1;
	unsigned int m = 1;
	unsigned int i;

	

	BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
	for (i = 0; i < n + m + 2; i++)
		enic->msix_entry[i].entry = i;

	if (enic->config.intr_mode < 1 &&
	    enic->rq_count >= n &&
	    enic->wq_count >= m &&
	    enic->cq_count >= n + m &&
	    enic->intr_count >= n + m + 2 &&
	    !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {

		enic->rq_count = n;
		enic->wq_count = m;
		enic->cq_count = n + m;
		enic->intr_count = n + m + 2;

		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);

		return 0;
	}

	

	if (enic->config.intr_mode < 2 &&
	    enic->rq_count >= 1 &&
	    enic->wq_count >= 1 &&
	    enic->cq_count >= 2 &&
	    enic->intr_count >= 1 &&
	    !pci_enable_msi(enic->pdev)) {

		enic->rq_count = 1;
		enic->wq_count = 1;
		enic->cq_count = 2;
		enic->intr_count = 1;

		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);

		return 0;
	}

	

	if (enic->config.intr_mode < 3 &&
	    enic->rq_count >= 1 &&
	    enic->wq_count >= 1 &&
	    enic->cq_count >= 2 &&
	    enic->intr_count >= 3) {

		enic->rq_count = 1;
		enic->wq_count = 1;
		enic->cq_count = 2;
		enic->intr_count = 3;

		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);

		return 0;
	}

	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);

	return -EINVAL;
}
Exemple #27
0
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
	struct drm_i915_private *dev_priv;
	struct intel_device_info *info, *device_info;
	int ret = 0, mmio_bar, mmio_size;
	uint32_t aperture_size;

	info = (struct intel_device_info *) flags;

	/* Refuse to load on gen6+ without kms enabled. */
	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
		DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
		DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
		return -ENODEV;
	}

	/* UMS needs agp support. */
	if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
		return -EINVAL;

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (dev_priv == NULL)
		return -ENOMEM;

	dev->dev_private = (void *)dev_priv;
	gpu_perf_dev_priv = (void *)dev_priv;
	dev_priv->dev = dev;

	/* Setup the write-once "constant" device info */
	device_info = (struct intel_device_info *)&dev_priv->info;
	memcpy(device_info, info, sizeof(dev_priv->info));
	device_info->device_id = dev->pdev->device;

	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
	spin_lock_init(&dev_priv->uncore.lock);
	spin_lock_init(&dev_priv->mm.object_stat_lock);
	spin_lock_init(&dev_priv->mmio_flip_lock);
	mutex_init(&dev_priv->dpio_lock);
	mutex_init(&dev_priv->modeset_restore_lock);

	intel_pm_setup(dev);

	intel_display_crc_init(dev);

	i915_dump_device_info(dev_priv);

	/* Not all pre-production machines fall into this category, only the
	 * very first ones. Almost everything should work, except for maybe
	 * suspend/resume. And we don't implement workarounds that affect only
	 * pre-production machines. */
	if (IS_HSW_EARLY_SDV(dev))
		DRM_INFO("This is an early pre-production Haswell machine. "
			 "It may not be fully functional.\n");

	if (i915_get_bridge_dev(dev)) {
		ret = -EIO;
		goto free_priv;
	}

	mmio_bar = IS_GEN2(dev) ? 1 : 0;
	/* Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
	if (info->gen < 5)
		mmio_size = 512*1024;
	else
		mmio_size = 2*1024*1024;

	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
	if (!dev_priv->regs) {
		DRM_ERROR("failed to map registers\n");
		ret = -EIO;
		goto put_bridge;
	}

	/* This must be called before any calls to HAS_PCH_* */
	intel_detect_pch(dev);

	intel_uncore_init(dev);

	if (i915_start_vgt(dev->pdev))
		i915_host_mediate = true;
	printk("i915_start_vgt: %s\n", i915_host_mediate ? "success" : "fail");

	i915_check_vgt(dev_priv);
	if (USES_VGT(dev))
		i915.enable_fbc = 0;

	ret = i915_gem_gtt_init(dev);
	if (ret)
		goto out_regs;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		/* WARNING: Apparently we must kick fbdev drivers before vgacon,
		 * otherwise the vga fbdev driver falls over. */
		ret = i915_kick_out_firmware_fb(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
			goto out_gtt;
		}

		ret = i915_kick_out_vgacon(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting VGA console\n");
			goto out_gtt;
		}
	}

	pci_set_master(dev->pdev);

	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN2(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));

	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));

	aperture_size = dev_priv->gtt.mappable_end;

	dev_priv->gtt.mappable =
		io_mapping_create_wc(dev_priv->gtt.mappable_base,
				     aperture_size);
	if (dev_priv->gtt.mappable == NULL) {
		ret = -EIO;
		goto out_gtt;
	}

	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
					      aperture_size);

	/* The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
	 * by the GPU. i915_gem_retire_requests() is called directly when we
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL) {
		DRM_ERROR("Failed to create our workqueue.\n");
		ret = -ENOMEM;
		goto out_mtrrfree;
	}

	dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->dp_wq == NULL) {
		DRM_ERROR("Failed to create our dp workqueue.\n");
		ret = -ENOMEM;
		goto out_freewq;
	}

	intel_irq_init(dev_priv);
	intel_uncore_sanitize(dev);

	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev);
	intel_setup_gmbus(dev);
	intel_opregion_setup(dev);

	intel_setup_bios(dev);

	i915_gem_load(dev);

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
	 * be lost or delayed, but we use them anyways to avoid
	 * stuck interrupts on some machines.
	 */
	if (!IS_I945G(dev) && !IS_I945GM(dev))
		pci_enable_msi(dev->pdev);

	intel_device_info_runtime_init(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
		if (ret)
			goto out_gem_unload;
	}

	intel_power_domains_init(dev_priv);

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		ret = i915_load_modeset_init(dev);
		if (ret < 0) {
			DRM_ERROR("failed to init modeset\n");
			goto out_power_well;
		}
#ifdef DRM_I915_VGT_SUPPORT
		if (USES_VGT(dev)) {
			/*
			 * Tell VGT that we have a valid surface to show
			 * after modesetting. We doesn't distinguish DOM0 and
			 * Linux guest here, The PVINFO write handler will
			 * handle this.
			 */
			I915_WRITE(vgt_info_off(display_ready), 1);
		}
#endif
	}

	i915_setup_sysfs(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		/* Must be done after probing outputs */
		intel_opregion_init(dev);
		acpi_video_register();
	}

	if (IS_GEN5(dev))
		intel_gpu_ips_init(dev_priv);

	intel_runtime_pm_enable(dev_priv);

	return 0;

out_power_well:
	intel_power_domains_fini(dev_priv);
	drm_vblank_cleanup(dev);
out_gem_unload:
	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
	unregister_shrinker(&dev_priv->mm.shrinker);

	if (dev->pdev->msi_enabled)
		pci_disable_msi(dev->pdev);

	intel_teardown_gmbus(dev);
	intel_teardown_mchbar(dev);
	pm_qos_remove_request(&dev_priv->pm_qos);
	destroy_workqueue(dev_priv->dp_wq);
out_freewq:
	destroy_workqueue(dev_priv->wq);
out_mtrrfree:
	arch_phys_wc_del(dev_priv->gtt.mtrr);
	io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
	i915_global_gtt_cleanup(dev);
out_regs:
	intel_uncore_fini(dev);
	pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
	pci_dev_put(dev_priv->bridge_dev);
free_priv:
	if (dev_priv->slab)
		kmem_cache_destroy(dev_priv->slab);
	kfree(dev_priv);
	return ret;
}
Exemple #28
0
static int xilly_probe(struct pci_dev *pdev,
		       const struct pci_device_id *ent)
{
	struct xilly_endpoint *endpoint;
	int rc;

	endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw);

	if (!endpoint)
		return -ENOMEM;

	pci_set_drvdata(pdev, endpoint);

	rc = pcim_enable_device(pdev);
	if (rc) {
		dev_err(endpoint->dev,
			"pcim_enable_device() failed. Aborting.\n");
		return rc;
	}

	/* L0s has caused packet drops. No power saving, thank you. */

	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);

	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		dev_err(endpoint->dev,
			"Incorrect BAR configuration. Aborting.\n");
		return -ENODEV;
	}

	rc = pcim_iomap_regions(pdev, 0x01, xillyname);
	if (rc) {
		dev_err(endpoint->dev,
			"pcim_iomap_regions() failed. Aborting.\n");
		return rc;
	}

	endpoint->registers = pcim_iomap_table(pdev)[0];

	pci_set_master(pdev);

	/* Set up a single MSI interrupt */
	if (pci_enable_msi(pdev)) {
		dev_err(endpoint->dev,
			"Failed to enable MSI interrupts. Aborting.\n");
		return -ENODEV;
	}
	rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0,
			      xillyname, endpoint);
	if (rc) {
		dev_err(endpoint->dev,
			"Failed to register MSI handler. Aborting.\n");
		return -ENODEV;
	}

	/*
	 * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets,
	 * even when the PCIe driver claims that a 64-bit mask is OK. On the
	 * other hand, on some architectures, 64-bit addressing is mandatory.
	 * So go for the 64-bit mask only when failing is the other option.
	 */

	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		endpoint->dma_using_dac = 0;
	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
		endpoint->dma_using_dac = 1;
	} else {
		dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
		return -ENODEV;
	}

	return xillybus_endpoint_discovery(endpoint);
}