示例#1
0
static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
{
	if (pci_request_region(dev, 2, "priv 2 regs"))
		goto err1;
	if (pci_request_region(dev, 0, "priv 1 regs"))
		goto err2;

	pr_devel("cxl_map_adapter_regs: p1: %#.16llx %#llx, p2: %#.16llx %#llx",
			p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));

	if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
		goto err3;

	if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
		goto err4;

	return 0;

err4:
	iounmap(adapter->p1_mmio);
	adapter->p1_mmio = NULL;
err3:
	pci_release_region(dev, 0);
err2:
	pci_release_region(dev, 2);
err1:
	return -ENOMEM;
}
示例#2
0
文件: bcwc_drv.c 项目: ddcc/bcwc_pcie
static int bcwc_pci_reserve_mem(struct bcwc_private *dev_priv)
{
	unsigned long start;
	unsigned long len;
	int ret;

	/* Reserve resources */
	ret = pci_request_region(dev_priv->pdev, BCWC_PCI_S2_IO, "S2 IO");
	if (ret) {
		dev_err(&dev_priv->pdev->dev, "Failed to request S2 IO\n");
		return ret;
	}

	ret = pci_request_region(dev_priv->pdev, BCWC_PCI_S2_MEM, "S2 MEM");
	if (ret) {
		dev_err(&dev_priv->pdev->dev, "Failed to request S2 MEM\n");
		return ret;
	}

	ret = pci_request_region(dev_priv->pdev, BCWC_PCI_ISP_IO, "ISP IO");
	if (ret) {
		dev_err(&dev_priv->pdev->dev, "Failed to request ISP IO\n");
		return ret;
	}

	/* S2 IO */
	start = pci_resource_start(dev_priv->pdev, BCWC_PCI_S2_IO);
	len = pci_resource_len(dev_priv->pdev, BCWC_PCI_S2_IO);
	dev_priv->s2_io = ioremap_nocache(start, len);
	dev_priv->s2_io_len = len;

	/* S2 MEM */
	start = pci_resource_start(dev_priv->pdev, BCWC_PCI_S2_MEM);
	len = pci_resource_len(dev_priv->pdev, BCWC_PCI_S2_MEM);
	dev_priv->s2_mem = ioremap_nocache(start, len);
	dev_priv->s2_mem_len = len;

	/* ISP IO */
	start = pci_resource_start(dev_priv->pdev, BCWC_PCI_ISP_IO);
	len = pci_resource_len(dev_priv->pdev, BCWC_PCI_ISP_IO);
	dev_priv->isp_io = ioremap_nocache(start, len);
	dev_priv->isp_io_len = len;

	dev_info(&dev_priv->pdev->dev,
		 "Allocated S2 regs (BAR %d). %u bytes at 0x%p",
		 BCWC_PCI_S2_IO, dev_priv->s2_io_len, dev_priv->s2_io);

	dev_info(&dev_priv->pdev->dev,
		 "Allocated S2 mem (BAR %d). %u bytes at 0x%p",
		 BCWC_PCI_S2_MEM, dev_priv->s2_mem_len, dev_priv->s2_mem);

	dev_info(&dev_priv->pdev->dev,
		 "Allocated ISP regs (BAR %d). %u bytes at 0x%p",
		 BCWC_PCI_ISP_IO, dev_priv->isp_io_len, dev_priv->isp_io);

	pci_set_master(dev_priv->pdev);

	return 0;
}
示例#3
0
文件: pb173.c 项目: SovakPaleny/pb173
int my_probe(struct pci_dev *pdev, const struct pci_device_id *id) 
{
    printk(KERN_INFO "probe: bus->number: %2.x driver_data: %lu pci_slot: %x pci_func: %x\n", pdev->bus->number, id->driver_data, 
	PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));

    int en = pci_enable_device(pdev);
    if (en < 0) {
	return en;
    }
    int req_reg = pci_request_region(pdev, 0, "res0");
    if (req_reg < 0) {
	return req_reg;
    }
    void* mem = pci_ioremap_bar(pdev, 0);
    pci_set_drvdata(pdev, mem);
    
    u32 bridge_id = readl(mem);
    u32 bridge_build = readl(mem + 0x0004);
    
    printk(KERN_INFO "id: %4x build %4x\n", bridge_id, bridge_build);
    
    int rok   = 2000 + ((bridge_build & 0xf0000000) >> 28);
    int mesic =        (bridge_build & 0x0f000000) >> 24;
    int den   =        (bridge_build & 0x00ff0000) >> 16;
    int hodina =       (bridge_build & 0x0000ff00) >> 8;
    int minuta =       (bridge_build & 0x000000ff) >> 0;
    printk(KERN_INFO "vyrobeno: rok: %i mesic: %i den: %i hodina: %i minuta %i\n", rok, mesic, den, hodina, minuta);
    printk(KERN_INFO "vyrobeno: %i.%i.%i %i:%i\n", den, mesic, rok, hodina, minuta);
    

    return 0;
}
示例#4
0
/**
 * map_bars - Resource allocation for device I/O Memory and I/O Port.
 *            Maps physical address of PCI buffer to virtual kernel space.
 *
 * @param l_head: List that will hold mapped BARs
 * @param pdev:   Pci device description
 * @param bars:   Bitmask of BARs to be requested
 * @param name:   Desired memory region name suffix(or NULL if none)
 *
 * @note Linked list should be freed afterwards by unmap_bars!
 *
 * @return how many BARs were mapped - in case of success.
 * @return -EBUSY                    - in case of failure.
 */
int map_bars(struct list_head *l_head, struct pci_dev *pdev, int bars, char *name)
{
  char res_name[32] = "BAR";
  bar_map_t *mem = NULL;
  bar_map_t *memP, *tmpP;
  int i, bcntr = 0;
  void __iomem *ioaddr;

  INIT_LIST_HEAD(l_head);

  for (i = 0; i < 6; i++)
    if ( (bars & (1 << i)) && (pci_resource_len(pdev, i)) ) {
      memset(&res_name[3], 0, sizeof(res_name)-3);
      snprintf(&res_name[3], sizeof(res_name)-3, "%d_%s", i, (name)?:'\0');
      if (pci_request_region(pdev, i, res_name))
	goto err_out;
      /* we will treat I/O ports as if they were I/O memory */
      if ( !(ioaddr = pci_iomap(pdev, i, 0)) )
	goto err_out_iomap;
      if ( !(mem = kzalloc((sizeof *mem), GFP_KERNEL)) )
	goto err_out_alloc;
      mem->mem_bar   = i;
      mem->mem_pdev  = pdev;
      mem->mem_remap = ioaddr;
      mem->mem_len   = pci_resource_len(pdev, i);
      list_add_tail(&mem->mem_list/*new*/, l_head/*head*/);
      ++bcntr;
    }
示例#5
0
int mhi_init_pcie_device(mhi_pcie_dev_info *mhi_pcie_dev)
{
	int ret_val = 0;
	long int sleep_time = 100000;
	struct pci_dev *pcie_device =
			(struct pci_dev *)mhi_pcie_dev->pcie_device;
	/* Enable the device */
	do {
		ret_val = pci_enable_device(mhi_pcie_dev->pcie_device);
		if (0 != ret_val) {
			mhi_log(MHI_MSG_ERROR,
				"Failed to enable pcie device ret_val %d\n",
				ret_val);
			mhi_log(MHI_MSG_ERROR,
				"Sleeping for ~ %li uS, and retrying.\n",
				sleep_time);
			usleep(sleep_time);
		}
	} while (ret_val != 0);

	mhi_log(MHI_MSG_INFO, "Successfully enabled pcie device.\n");

	mhi_pcie_dev->core.bar0_base =
		(uintptr_t)ioremap_nocache(pci_resource_start(pcie_device, 0),
			pci_resource_len(pcie_device, 0));
	mhi_pcie_dev->core.bar0_end = mhi_pcie_dev->core.bar0_base +
		pci_resource_len(pcie_device, 0);
	mhi_pcie_dev->core.bar2_base =
		(uintptr_t)ioremap_nocache(pci_resource_start(pcie_device, 2),
			pci_resource_len(pcie_device, 2));
	mhi_pcie_dev->core.bar2_end = mhi_pcie_dev->core.bar2_base +
		pci_resource_len(pcie_device, 2);

	if (0 == mhi_pcie_dev->core.bar0_base) {
		mhi_log(MHI_MSG_ERROR,
			"Failed to register for pcie resources\n");
		goto mhi_pcie_read_ep_config_err;
	}

	mhi_log(MHI_MSG_INFO, "Device BAR0 address is at 0x%llx\n",
			mhi_pcie_dev->core.bar0_base);
	ret_val = pci_request_region(pcie_device, 0, mhi_pcie_driver.name);
	if (ret_val)
		mhi_log(MHI_MSG_ERROR, "Could not request BAR0 region\n");

	mhi_pcie_dev->core.manufact_id = pcie_device->vendor;
	mhi_pcie_dev->core.dev_id = pcie_device->device;

	if (mhi_pcie_dev->core.manufact_id != MHI_PCIE_VENDOR_ID ||
			mhi_pcie_dev->core.dev_id != MHI_PCIE_DEVICE_ID) {
		mhi_log(MHI_MSG_ERROR, "Incorrect device/manufacturer ID\n");
		goto mhi_device_list_error;
	}
	/* We need to ensure that the link is stable before we kick off MHI */
	return 0;
mhi_device_list_error:
	pci_disable_device(pcie_device);
mhi_pcie_read_ep_config_err:
	return -EIO;
}
示例#6
0
static int __devinit tc86c001_init_one(struct pci_dev *dev,
				       const struct pci_device_id *id)
{
	int rc;

	rc = pci_enable_device(dev);
	if (rc)
		goto out;

	rc = pci_request_region(dev, 5, DRV_NAME);
	if (rc) {
		printk(KERN_ERR DRV_NAME ": system control regs already in use");
		goto out_disable;
	}

	rc = ide_pci_init_one(dev, &tc86c001_chipset, NULL);
	if (rc)
		goto out_release;

	goto out;

out_release:
	pci_release_region(dev, 5);
out_disable:
	pci_disable_device(dev);
out:
	return rc;
}
示例#7
0
/* the PCI probing function */
int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
{
	struct pci_dev *pci_dev = vp_dev->pci_dev;
	int rc;

	/* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
	if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
		return -ENODEV;

	if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
		printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
		       VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
		return -ENODEV;
	}

	rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
	if (rc) {
		rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
	} else {
		/*
		 * The virtio ring base address is expressed as a 32-bit PFN,
		 * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
		 */
		dma_set_coherent_mask(&pci_dev->dev,
				DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
	}

	if (rc)
		dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");

	rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
	if (rc)
		return rc;

	rc = -ENOMEM;
	vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
	if (!vp_dev->ioaddr)
		goto err_iomap;

	vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;

	/* we use the subsystem vendor/device id as the virtio vendor/device
	 * id.  this allows us to use the same PCI vendor/device id for all
	 * virtio devices and to identify the particular virtio driver by
	 * the subsystem ids */
	vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
	vp_dev->vdev.id.device = pci_dev->subsystem_device;

	vp_dev->vdev.config = &virtio_pci_config_ops;

	vp_dev->config_vector = vp_config_vector;
	vp_dev->setup_vq = setup_vq;
	vp_dev->del_vq = del_vq;

	return 0;

err_iomap:
	pci_release_region(pci_dev, 0);
	return rc;
}
示例#8
0
/*************************************************************************/ /*!
@Function       OSPCIResumeDev
@Description    Prepare a PCI device to be resumed by power management
@Input          hPVRPCI                 PCI device handle
@Return	        PVRSRV_ERROR	        Services error code
*/ /**************************************************************************/
PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
{
	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
	int err;
	int i;

	err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
	switch(err)
	{
		case 0:
			break;
		case -EIO:
			printk(KERN_ERR "OSPCIResumeDev: device doesn't support PCI PM");
			break;
		case -EINVAL:
			printk(KERN_ERR "OSPCIResumeDev: can't enter requested power state");
			return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
		default:
			printk(KERN_ERR "OSPCIResumeDev: pci_set_power_state failed (%d)", err);
			return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
	}

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
	pci_restore_state(psPVRPCI->psPCIDev);
#else
	err = pci_restore_state(psPVRPCI->psPCIDev);
	if (err != 0)
	{
		printk(KERN_ERR "OSPCIResumeDev: pci_restore_state failed (%d)", err);
		return PVRSRV_ERROR_PCI_CALL_FAILED;
	}
#endif
	err = pci_enable_device(psPVRPCI->psPCIDev);
	if (err != 0)
	{
		printk(KERN_ERR "OSPCIResumeDev: Couldn't enable device (%d)", err);
		return PVRSRV_ERROR_PCI_CALL_FAILED;
	}

	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)	/* PRQA S 3358 */ /* misuse of enums */
		pci_set_master(psPVRPCI->psPCIDev);

	/* Restore the PCI resource tracking array */
	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
	{
		if (psPVRPCI->abPCIResourceInUse[i])
		{
			err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME);
			if (err != 0)
			{
				printk(KERN_ERR "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err);
			}
		}
	}

	return PVRSRV_OK;
}
示例#9
0
static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
							const char *name)
{
	int err = pci_request_region(dev, 5, name);

	if (err)
		printk(KERN_ERR "%s: system control regs already in use", name);
	return err;
}
static int __init cs5535_gpio_probe(struct pci_dev *pdev,
		const struct pci_device_id *pci_id)
{
	int err;
	ulong mask_orig = mask;

	/* There are two ways to get the GPIO base address; one is by
	 * fetching it from MSR_LBAR_GPIO, the other is by reading the
	 * PCI BAR info.  The latter method is easier (especially across
	 * different architectures), so we'll stick with that for now.  If
	 * it turns out to be unreliable in the face of crappy BIOSes, we
	 * can always go back to using MSRs.. */

	err = pci_enable_device_io(pdev);
	if (err) {
		dev_err(&pdev->dev, "can't enable device IO\n");
		goto done;
	}

	err = pci_request_region(pdev, GPIO_BAR, DRV_NAME);
	if (err) {
		dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR);
		goto done;
	}

	/* set up the driver-specific struct */
	cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR);
	cs5535_gpio_chip.pdev = pdev;
	spin_lock_init(&cs5535_gpio_chip.lock);

	dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR,
			(unsigned long long) cs5535_gpio_chip.base);

	/* mask out reserved pins */
	mask &= 0x1F7FFFFF;

	/* do not allow pin 28, Power Button, as there's special handling
	 * in the PMC needed. (note 12, p. 48) */
	mask &= ~(1 << 28);

	if (mask_orig != mask)
		dev_info(&pdev->dev, "mask changed from 0x%08lX to 0x%08lX\n",
				mask_orig, mask);

	/* finally, register with the generic GPIO API */
	err = gpiochip_add(&cs5535_gpio_chip.chip);
	if (err)
		goto release_region;

	dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n");
	return 0;

release_region:
	pci_release_region(pdev, GPIO_BAR);
done:
	return err;
}
示例#11
0
/*************************************************************************/ /*!
@Function       OSPCIAddrRangeFunc
@Description    Internal support function for various address range related 
                functions
@Input          eFunc                   Function to perform
@Input          hPVRPCI                 PCI device handle
@Input          ui32Index               Address range index
@Return		IMG_UINT32              Function dependent value
*/ /**************************************************************************/
static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
				     PVRSRV_PCI_DEV_HANDLE hPVRPCI,
				     IMG_UINT32 ui32Index)
{
	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;

	if (ui32Index >= DEVICE_COUNT_RESOURCE)
	{
		printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range");
		return 0;
	}

	switch (eFunc)
	{
		case HOST_PCI_ADDR_RANGE_FUNC_LEN:
		{
			return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
		}
		case HOST_PCI_ADDR_RANGE_FUNC_START:
		{
			return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
		}
		case HOST_PCI_ADDR_RANGE_FUNC_END:
		{
			return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
		}
		case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
		{
			int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME);
			if (err != 0)
			{
				printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err);
				return 0;
			}
			psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
			return 1;
		}
		case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
		{
			if (psPVRPCI->abPCIResourceInUse[ui32Index])
			{
				pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index);
				psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
			}
			return 1;
		}
		default:
		{
			printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function");
			break;
		}
	}

	return 0;
}
示例#12
0
static int asd_map_memio(struct asd_ha_struct *asd_ha)
{
	int err, i;
	struct asd_ha_addrspace *io_handle;

	asd_ha->iospace = 0;
	for (i = 0; i < 3; i += 2) {
		io_handle = &asd_ha->io_handle[i==0?0:1];
		io_handle->start = pci_resource_start(asd_ha->pcidev, i);
		io_handle->len   = pci_resource_len(asd_ha->pcidev, i);
		io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
		err = -ENODEV;
		if (!io_handle->start || !io_handle->len) {
			asd_printk("MBAR%d start or length for %s is 0.\n",
				   i==0?0:1, pci_name(asd_ha->pcidev));
			goto Err;
		}
		err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
		if (err) {
			asd_printk("couldn't reserve memory region for %s\n",
				   pci_name(asd_ha->pcidev));
			goto Err;
		}
		if (io_handle->flags & IORESOURCE_CACHEABLE)
			io_handle->addr = ioremap(io_handle->start,
						  io_handle->len);
		else
			io_handle->addr = ioremap_nocache(io_handle->start,
							  io_handle->len);
		if (!io_handle->addr) {
			asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
				   pci_name(asd_ha->pcidev));
			err = -ENOMEM;
			goto Err_unreq;
		}
	}

	return 0;
Err_unreq:
	pci_release_region(asd_ha->pcidev, i);
Err:
	if (i > 0) {
		io_handle = &asd_ha->io_handle[0];
		iounmap(io_handle->addr);
		pci_release_region(asd_ha->pcidev, 0);
	}
	return err;
}
示例#13
0
static inline __must_check int allocQueues(struct xordev *dev) {
  dev->dmaSource1 = NULL;
  dev->dmaSource2 = NULL;
  dev->dmaDestination = NULL;
  dev->source1 = NULL;
  dev->source2 = NULL;
  dev->destination = NULL;
  dev->dmaSize = NULL;
  dev->deviceState = NULL;
  dev->deviceStateSpinlock = NULL;
  dev->waitSource1 = NULL;
  dev->waitSource2 = NULL;
  dev->waitDestination = NULL;
  pci_set_master(dev->pciDev);
  TRY_NORES(OR_GOTO(fail), pci_set_dma_mask(dev->pciDev, DMA_BIT_MASK(32)), "set dma mast");
  TRY_PTR(OR_GOTO(fail), dev->dmaSource1PciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSource2PciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaDestinationPciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSource1, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES,
      dev->dmaSource1PciAddr, GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSource2, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES,
      dev->dmaSource2PciAddr, GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaDestination, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES,
      dev->dmaDestinationPciAddr, GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSize, kmalloc(sizeof(size_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->deviceState, kmalloc(sizeof(int), GFP_KERNEL));
  *dev->deviceState = DEVICE_UNOCCUPIED;
  TRY_PTR(OR_GOTO(fail), dev->deviceStateSpinlock, kmalloc(sizeof(spinlock_t), GFP_KERNEL));
  spin_lock_init(dev->deviceStateSpinlock);
  TRY_PTR(OR_GOTO(fail), dev->waitSource1, kmalloc(sizeof(wait_queue_t), GFP_KERNEL));
  init_waitqueue_head(dev->waitSource1);
  TRY_PTR(OR_GOTO(fail), dev->waitSource2, kmalloc(sizeof(wait_queue_t), GFP_KERNEL));
  init_waitqueue_head(dev->waitSource2);
  TRY_PTR(OR_GOTO(fail), dev->waitDestination, kmalloc(sizeof(wait_queue_t), GFP_KERNEL));
  init_waitqueue_head(dev->waitDestination);
  TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->source1), "create source1 memory queue");
  TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->source2), "create source2 memory queue");
  TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->destination), "create destination memory queue");
  TRY_NORES(OR_GOTO(fail), pci_request_region(dev->pciDev, 0, "xordev"), "request BAR0");
  TRY_PTR(OR_GOTO(fail), dev->bar0, pci_iomap(dev->pciDev, 0, BAR0_SIZE), "map pci iomem");
  return 0;
  fail:
  memfifoDelete(&dev->destination);
  memfifoDelete(&dev->source2);
  memfifoDelete(&dev->source1);
  return -ENOMEM;
}
/* enable SRAM if sram detected */
static void sram_init(struct usb_hcd *hcd)
{
	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
	void __iomem		*base = NULL;
	void __iomem		*addr = NULL;

	if (!hcd->has_sram)
		return;
	ehci->sram_addr = pci_resource_start(pdev, 1);
	ehci->sram_size = pci_resource_len(pdev, 1);
	ehci_info(ehci, "Found HCD SRAM at %x size:%x\n",
		ehci->sram_addr, ehci->sram_size);

	if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
		ehci_warn(ehci, "SRAM request failed\n");
		hcd->has_sram = 0;
		return;
	} else if (!dma_declare_coherent_memory(&pdev->dev, ehci->sram_addr,
			ehci->sram_addr, ehci->sram_size, DMA_MEMORY_MAP)) {
		ehci_warn(ehci, "SRAM DMA declare failed\n");
		pci_release_region(pdev, 1);
		hcd->has_sram = 0;
		return;
	}

	/* initialize SRAM to 0 to avoid ECC errors during entry into D0 */
	base = ioremap_nocache(ehci->sram_addr, ehci->sram_size);
	if (base == NULL) {
		ehci_warn(ehci, "SRAM init: ioremap failed\n");
		return;
	}

	addr = base;

	while (addr < base + ehci->sram_size) {
		writel(0x0, addr);
		addr = addr + 4;
	}

	iounmap(base);
}
示例#15
0
static int mem_alloc(struct pci_dev *pdev, uintptr_t *phy_addr,
		void **virt_addr, int bar)
{
	void __iomem *mem;
	int ret = 0;
	unsigned long start = 0, len;

	/* dedicate isolated memory region */
	start = pci_resource_start(pdev, bar);
	len = pci_resource_len(pdev, bar);
	if (!start || !len) {
		dev_err(&pdev->dev, "bar %d address not set\n", bar);
		ret = -EINVAL;
		goto err;
	}

	ret = pci_request_region(pdev, bar, "intel_mcu");
	if (ret) {
		dev_err(&pdev->dev,
				"failed to request psh region 0x%lx-0x%lx\n",
				start,
				(unsigned long)pci_resource_end(pdev, bar));
		goto err;
	}

	mem = ioremap_nocache(start, len);
	if (!mem) {
		dev_err(&pdev->dev, "can not ioremap app imr address\n");
		ret = -EINVAL;
		goto err_ioremap;
	}

	*phy_addr = start;
	*virt_addr = (void *)mem;
	return 0;

err_ioremap:
	pci_release_region(pdev, bar);
err:
	return ret;
}
static int serial_hsu_pci_port_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	struct uart_hsu_port *up;
	int ret, port, hw_type;
	resource_size_t start, len;

	start = pci_resource_start(pdev, 0);
	len = pci_resource_len(pdev, 0);

	dev_info(&pdev->dev,
		"FUNC: %d driver: %ld addr:%lx len:%lx\n",
		PCI_FUNC(pdev->devfn), ent->driver_data,
		(unsigned long) start, (unsigned long) len);

	port = intel_mid_hsu_func_to_port(PCI_FUNC(pdev->devfn));
	if (port == -1)
		return 0;

	ret = pci_enable_device(pdev);
	if (ret)
		return ret;

	ret = pci_request_region(pdev, 0, "hsu");
	if (ret)
		goto err;

	up = serial_hsu_port_setup(&pdev->dev, port, start, len,
			pdev->irq);
	if (IS_ERR(up))
		goto err;

	pci_set_drvdata(pdev, up);

	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_allow(&pdev->dev);
	return 0;
err:
	pci_disable_device(pdev);
	return ret;
}
static int serial_hsu_pci_dma_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	struct hsu_dma_chan *dchan;
	int ret, share_irq = 0;
	resource_size_t start, len;

	start = pci_resource_start(pdev, 0);
	len = pci_resource_len(pdev, 0);

	dev_info(&pdev->dev,
		"FUNC: %d driver: %ld addr:%lx len:%lx\n",
		PCI_FUNC(pdev->devfn), ent->driver_data,
		(unsigned long) pci_resource_start(pdev, 0),
		(unsigned long) pci_resource_len(pdev, 0));

	ret = pci_enable_device(pdev);
	if (ret)
		return ret;

	ret = pci_request_region(pdev, 0, "hsu dma");
	if (ret)
		goto err;

	/* share irq with port? ANN all and TNG chip from B0 stepping */
	if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER &&
		pdev->revision >= 0x1) ||
		intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
		share_irq = 1;

	ret = serial_hsu_dma_setup(&pdev->dev, start, len, pdev->irq, share_irq);
	if (ret)
		goto err;

	return 0;
err:
	pci_disable_device(pdev);
	return ret;
}
示例#18
0
static int __devinit asd_map_ioport(struct asd_ha_struct *asd_ha)
{
	int i = PCI_IOBAR_OFFSET, err;
	struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];

	asd_ha->iospace = 1;
	io_handle->start = pci_resource_start(asd_ha->pcidev, i);
	io_handle->len   = pci_resource_len(asd_ha->pcidev, i);
	io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
	io_handle->addr  = (void __iomem *) io_handle->start;
	if (!io_handle->start || !io_handle->len) {
		asd_printk("couldn't get IO ports for %s\n",
			   pci_name(asd_ha->pcidev));
		return -ENODEV;
	}
	err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
	if (err) {
		asd_printk("couldn't reserve io space for %s\n",
			   pci_name(asd_ha->pcidev));
	}

	return err;
}
示例#19
0
static int piix4_poweroff_probe(struct pci_dev *dev,
				const struct pci_device_id *id)
{
	int res;

	if (pm_dev)
		return -EINVAL;

	/* Request access to the PIIX4 PM IO registers */
	res = pci_request_region(dev, piix4_pm_io_region,
				 "PIIX4 PM IO registers");
	if (res) {
		dev_err(&dev->dev, "failed to request PM IO registers: %d\n",
			res);
		return res;
	}

	pm_dev = dev;
	io_offset = pci_resource_start(dev, piix4_pm_io_region);
	pm_power_off = piix4_poweroff;

	return 0;
}
示例#20
0
void	*FindPCI1750Card(void)
{
	int		iFound = 0;
	struct pci_dev	*pDevice = NULL;

	if (pcibios_present())
	{
		while((pDevice = pci_find_device(0x13fe, 1750, pDevice)))
		{
			pci_setup_device(pDevice);

			if (pci_request_region(pDevice, 2, "plc_labcard") == 0)
				iFound = 1;

			break;
		}
	}


	if (iFound)
		return ((void *)pDevice);
	
	return NULL;
}
示例#21
0
static int __devinit platform_pci_init(struct pci_dev *pdev,
				       const struct pci_device_id *ent)
{
	int i, ret;
	long ioaddr;
	long mmio_addr, mmio_len;
	unsigned int max_nr_gframes;

	i = pci_enable_device(pdev);
	if (i)
		return i;

	ioaddr = pci_resource_start(pdev, 0);

	mmio_addr = pci_resource_start(pdev, 1);
	mmio_len = pci_resource_len(pdev, 1);

	if (mmio_addr == 0 || ioaddr == 0) {
		dev_err(&pdev->dev, "no resources found\n");
		ret = -ENOENT;
		goto pci_out;
	}

	ret = pci_request_region(pdev, 1, DRV_NAME);
	if (ret < 0)
		goto pci_out;

	ret = pci_request_region(pdev, 0, DRV_NAME);
	if (ret < 0)
		goto mem_out;

	platform_mmio = mmio_addr;
	platform_mmiolen = mmio_len;

	if (!xen_have_vector_callback) {
		ret = xen_allocate_irq(pdev);
		if (ret) {
			dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
			goto out;
		}
		callback_via = get_callback_via(pdev);
		ret = xen_set_callback_via(callback_via);
		if (ret) {
			dev_warn(&pdev->dev, "Unable to set the evtchn callback "
					 "err=%d\n", ret);
			goto out;
		}
	}

	max_nr_gframes = gnttab_max_grant_frames();
	xen_hvm_resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
	ret = gnttab_init();
	if (ret)
		goto out;
	xenbus_probe(NULL);
	return 0;

out:
	pci_release_region(pdev, 0);
mem_out:
	pci_release_region(pdev, 1);
pci_out:
	pci_disable_device(pdev);
	return ret;
}
示例#22
0
文件: chipsfb.c 项目: 7799/linux
static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
{
	struct fb_info *p;
	unsigned long addr, size;
	unsigned short cmd;
	int rc = -ENODEV;

	if (pci_enable_device(dp) < 0) {
		dev_err(&dp->dev, "Cannot enable PCI device\n");
		goto err_out;
	}

	if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
		goto err_disable;
	addr = pci_resource_start(dp, 0);
	size = pci_resource_len(dp, 0);
	if (addr == 0)
		goto err_disable;

	p = framebuffer_alloc(0, &dp->dev);
	if (p == NULL) {
		dev_err(&dp->dev, "Cannot allocate framebuffer structure\n");
		rc = -ENOMEM;
		goto err_disable;
	}

	if (pci_request_region(dp, 0, "chipsfb") != 0) {
		dev_err(&dp->dev, "Cannot request framebuffer\n");
		rc = -EBUSY;
		goto err_release_fb;
	}

#ifdef __BIG_ENDIAN
	addr += 0x800000;	// Use big-endian aperture
#endif

	/* we should use pci_enable_device here, but,
	   the device doesn't declare its I/O ports in its BARs
	   so pci_enable_device won't turn on I/O responses */
	pci_read_config_word(dp, PCI_COMMAND, &cmd);
	cmd |= 3;	/* enable memory and IO space */
	pci_write_config_word(dp, PCI_COMMAND, cmd);

#ifdef CONFIG_PMAC_BACKLIGHT
	/* turn on the backlight */
	mutex_lock(&pmac_backlight_mutex);
	if (pmac_backlight) {
		pmac_backlight->props.power = FB_BLANK_UNBLANK;
		backlight_update_status(pmac_backlight);
	}
	mutex_unlock(&pmac_backlight_mutex);
#endif /* CONFIG_PMAC_BACKLIGHT */

#ifdef CONFIG_PPC
	p->screen_base = __ioremap(addr, 0x200000, _PAGE_NO_CACHE);
#else
	p->screen_base = ioremap(addr, 0x200000);
#endif
	if (p->screen_base == NULL) {
		dev_err(&dp->dev, "Cannot map framebuffer\n");
		rc = -ENOMEM;
		goto err_release_pci;
	}

	pci_set_drvdata(dp, p);

	init_chips(p, addr);

	if (register_framebuffer(p) < 0) {
		dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n");
		goto err_unmap;
	}

	dev_info(&dp->dev,"fb%d: Chips 65550 frame buffer"
		 " (%dK RAM detected)\n",
		 p->node, p->fix.smem_len / 1024);

	return 0;

 err_unmap:
	iounmap(p->screen_base);
 err_release_pci:
	pci_release_region(dp, 0);
 err_release_fb:
	framebuffer_release(p);
 err_disable:
 err_out:
	return rc;
}
示例#23
0
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	void __iomem *mem;
	struct ath_softc *sc;
	struct ieee80211_hw *hw;
	u8 csz;
	u32 val;
	int ret = 0;
	char hw_name[64];

	if (pci_enable_device(pdev))
		return -EIO;

	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret) {
		printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
		goto err_dma;
	}

	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret) {
		printk(KERN_ERR "ath9k: 32-bit DMA consistent "
			"DMA enable failed\n");
		goto err_dma;
	}

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
	if (csz == 0) {
		/*
		 * Linux 2.4.18 (at least) writes the cache line size
		 * register as a 16-bit wide register which is wrong.
		 * We must have this setup properly for rx buffer
		 * DMA to work so force a reasonable value here if it
		 * comes up zero.
		 */
		csz = L1_CACHE_BYTES / sizeof(u32);
		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
	}
	/*
	 * The default setting of latency timer yields poor results,
	 * set it to the value used by other systems. It may be worth
	 * tweaking this setting more.
	 */
	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);

	pci_set_master(pdev);

	/*
	 * Disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state.
	 */
	pci_read_config_dword(pdev, 0x40, &val);
	if ((val & 0x0000ff00) != 0)
		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);

	ret = pci_request_region(pdev, 0, "ath9k");
	if (ret) {
		dev_err(&pdev->dev, "PCI memory region reserve error\n");
		ret = -ENODEV;
		goto err_region;
	}

	mem = pci_iomap(pdev, 0, 0);
	if (!mem) {
		printk(KERN_ERR "PCI memory map error\n") ;
		ret = -EIO;
		goto err_iomap;
	}

	hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
	if (!hw) {
		dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
		ret = -ENOMEM;
		goto err_alloc_hw;
	}

	SET_IEEE80211_DEV(hw, &pdev->dev);
	pci_set_drvdata(pdev, hw);

	sc = hw->priv;
	sc->hw = hw;
	sc->dev = &pdev->dev;
	sc->mem = mem;

	/* Will be cleared in ath9k_start() */
	sc->sc_flags |= SC_OP_INVALID;

	ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
	if (ret) {
		dev_err(&pdev->dev, "request_irq failed\n");
		goto err_irq;
	}

	sc->irq = pdev->irq;

	ret = ath9k_init_device(id->device, sc, &ath_pci_bus_ops);
	if (ret) {
		dev_err(&pdev->dev, "Failed to initialize device\n");
		goto err_init;
	}

	ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
	wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
		   hw_name, (unsigned long)mem, pdev->irq);

	return 0;

err_init:
	free_irq(sc->irq, sc);
err_irq:
	ieee80211_free_hw(hw);
err_alloc_hw:
	pci_iounmap(pdev, mem);
err_iomap:
	pci_release_region(pdev, 0);
err_region:
	/* Nothing */
err_dma:
	pci_disable_device(pdev);
	return ret;
}
示例#24
0
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
	struct vmw_private *dev_priv;
	int ret;
	uint32_t svga_id;
	enum vmw_res_type i;
	bool refuse_dma = false;
	char host_log[100] = {0};

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (unlikely(dev_priv == NULL)) {
		DRM_ERROR("Failed allocating a device private struct.\n");
		return -ENOMEM;
	}

	pci_set_master(dev->pdev);

	dev_priv->dev = dev;
	dev_priv->vmw_chipset = chipset;
	dev_priv->last_read_seqno = (uint32_t) -100;
	mutex_init(&dev_priv->cmdbuf_mutex);
	mutex_init(&dev_priv->release_mutex);
	mutex_init(&dev_priv->binding_mutex);
	mutex_init(&dev_priv->global_kms_state_mutex);
	rwlock_init(&dev_priv->resource_lock);
	ttm_lock_init(&dev_priv->reservation_sem);
	spin_lock_init(&dev_priv->hw_lock);
	spin_lock_init(&dev_priv->waiter_lock);
	spin_lock_init(&dev_priv->cap_lock);
	spin_lock_init(&dev_priv->svga_lock);

	for (i = vmw_res_context; i < vmw_res_max; ++i) {
		idr_init(&dev_priv->res_idr[i]);
		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
	}

	mutex_init(&dev_priv->init_mutex);
	init_waitqueue_head(&dev_priv->fence_queue);
	init_waitqueue_head(&dev_priv->fifo_queue);
	dev_priv->fence_queue_waiters = 0;
	dev_priv->fifo_queue_waiters = 0;

	dev_priv->used_memory_size = 0;

	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);

	dev_priv->assume_16bpp = !!vmw_assume_16bpp;

	dev_priv->enable_fb = enable_fbdev;

	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
	if (svga_id != SVGA_ID_2) {
		ret = -ENOSYS;
		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
		goto out_err0;
	}

	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
	ret = vmw_dma_select_mode(dev_priv);
	if (unlikely(ret != 0)) {
		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
		refuse_dma = true;
	}

	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);

	vmw_get_initial_size(dev_priv);

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		dev_priv->max_gmr_ids =
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
		dev_priv->max_gmr_pages =
			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
		dev_priv->memory_size =
			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
		dev_priv->memory_size -= dev_priv->vram_size;
	} else {
		/*
		 * An arbitrary limit of 512MiB on surface
		 * memory. But all HWV8 hardware supports GMR2.
		 */
		dev_priv->memory_size = 512*1024*1024;
	}
	dev_priv->max_mob_pages = 0;
	dev_priv->max_mob_size = 0;
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		uint64_t mem_size =
			vmw_read(dev_priv,
				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);

		/*
		 * Workaround for low memory 2D VMs to compensate for the
		 * allocation taken by fbdev
		 */
		if (!(dev_priv->capabilities & SVGA_CAP_3D))
			mem_size *= 2;

		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
		dev_priv->prim_bb_mem =
			vmw_read(dev_priv,
				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
		dev_priv->max_mob_size =
			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
		dev_priv->stdu_max_width =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
		dev_priv->stdu_max_height =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);

		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
		dev_priv->texture_max_width = vmw_read(dev_priv,
						       SVGA_REG_DEV_CAP);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
		dev_priv->texture_max_height = vmw_read(dev_priv,
							SVGA_REG_DEV_CAP);
	} else {
		dev_priv->texture_max_width = 8192;
		dev_priv->texture_max_height = 8192;
		dev_priv->prim_bb_mem = dev_priv->vram_size;
	}

	vmw_print_capabilities(dev_priv->capabilities);

	ret = vmw_dma_masks(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		DRM_INFO("Max GMR ids is %u\n",
			 (unsigned)dev_priv->max_gmr_ids);
		DRM_INFO("Max number of GMR pages is %u\n",
			 (unsigned)dev_priv->max_gmr_pages);
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
			 (unsigned)dev_priv->memory_size / 1024);
	}
	DRM_INFO("Maximum display memory size is %u kiB\n",
		 dev_priv->prim_bb_mem / 1024);
	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
		 dev_priv->vram_start, dev_priv->vram_size / 1024);
	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);

	ret = vmw_ttm_global_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;


	vmw_master_init(&dev_priv->fbdev_master);
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	dev_priv->active_master = &dev_priv->fbdev_master;

	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
				       dev_priv->mmio_size, MEMREMAP_WB);

	if (unlikely(dev_priv->mmio_virt == NULL)) {
		ret = -ENOMEM;
		DRM_ERROR("Failed mapping MMIO.\n");
		goto out_err3;
	}

	/* Need mmio memory to check for fifo pitchlock cap. */
	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
	    !vmw_fifo_have_pitchlock(dev_priv)) {
		ret = -ENOSYS;
		DRM_ERROR("Hardware has no pitchlock\n");
		goto out_err4;
	}

	dev_priv->tdev = ttm_object_device_init
		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);

	if (unlikely(dev_priv->tdev == NULL)) {
		DRM_ERROR("Unable to initialize TTM object management.\n");
		ret = -ENOMEM;
		goto out_err4;
	}

	dev->dev_private = dev_priv;

	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
	dev_priv->stealth = (ret != 0);
	if (dev_priv->stealth) {
		/**
		 * Request at least the mmio PCI resource.
		 */

		DRM_INFO("It appears like vesafb is loaded. "
			 "Ignore above error if any.\n");
		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
		if (unlikely(ret != 0)) {
			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
			goto out_no_device;
		}
	}

	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
		ret = drm_irq_install(dev, dev->pdev->irq);
		if (ret != 0) {
			DRM_ERROR("Failed installing irq: %d\n", ret);
			goto out_no_irq;
		}
	}

	dev_priv->fman = vmw_fence_manager_init(dev_priv);
	if (unlikely(dev_priv->fman == NULL)) {
		ret = -ENOMEM;
		goto out_no_fman;
	}

	ret = ttm_bo_device_init(&dev_priv->bdev,
				 dev_priv->bo_global_ref.ref.object,
				 &vmw_bo_driver,
				 dev->anon_inode->i_mapping,
				 VMWGFX_FILE_PAGE_OFFSET,
				 false);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
		goto out_no_bdev;
	}

	/*
	 * Enable VRAM, but initially don't use it until SVGA is enabled and
	 * unhidden.
	 */
	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
			     (dev_priv->vram_size >> PAGE_SHIFT));
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
		goto out_no_vram;
	}
	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;

	dev_priv->has_gmr = true;
	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
					 VMW_PL_GMR) != 0) {
		DRM_INFO("No GMR memory available. "
			 "Graphics memory resources are very limited.\n");
		dev_priv->has_gmr = false;
	}

	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		dev_priv->has_mob = true;
		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
				   VMW_PL_MOB) != 0) {
			DRM_INFO("No MOB memory available. "
				 "3D will be disabled.\n");
			dev_priv->has_mob = false;
		}
	}

	if (dev_priv->has_mob) {
		spin_lock(&dev_priv->cap_lock);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		spin_unlock(&dev_priv->cap_lock);
	}


	ret = vmw_kms_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_no_kms;
	vmw_overlay_init(dev_priv);

	ret = vmw_request_device(dev_priv);
	if (ret)
		goto out_no_fifo;

	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");

	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
		VMWGFX_REPO, VMWGFX_GIT_VERSION);
	vmw_host_log(host_log);

	memset(host_log, 0, sizeof(host_log));
	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
		VMWGFX_DRIVER_PATCHLEVEL);
	vmw_host_log(host_log);

	if (dev_priv->enable_fb) {
		vmw_fifo_resource_inc(dev_priv);
		vmw_svga_enable(dev_priv);
		vmw_fb_init(dev_priv);
	}

	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
	register_pm_notifier(&dev_priv->pm_nb);

	return 0;

out_no_fifo:
	vmw_overlay_close(dev_priv);
	vmw_kms_close(dev_priv);
out_no_kms:
	if (dev_priv->has_mob)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
	if (dev_priv->has_gmr)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
	(void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
	vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
		drm_irq_uninstall(dev_priv->dev);
out_no_irq:
	if (dev_priv->stealth)
		pci_release_region(dev->pdev, 2);
	else
		pci_release_regions(dev->pdev);
out_no_device:
	ttm_object_device_release(&dev_priv->tdev);
out_err4:
	memunmap(dev_priv->mmio_virt);
out_err3:
	vmw_ttm_global_release(dev_priv);
out_err0:
	for (i = vmw_res_context; i < vmw_res_max; ++i)
		idr_destroy(&dev_priv->res_idr[i]);

	if (dev_priv->ctx.staged_bindings)
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
	kfree(dev_priv);
	return ret;
}
示例#25
0
int do_setup_ddr(struct device *dev)
{
	struct psh_ia_priv *ia_data =
			(struct psh_ia_priv *)dev_get_drvdata(dev);
	struct psh_plt_priv *plt_priv =
			(struct psh_plt_priv *)ia_data->platform_priv;
	uintptr_t ddr_phy = plt_priv->ddr_phy;
	uintptr_t imr2_phy = plt_priv->imr2_phy;
	const struct firmware *fw_entry;
	struct ia_cmd cmd_user = {
		.cmd_id = CMD_SETUP_DDR,
		.sensor_id = 0,
		};
	static int fw_load_done;
	int load_default = 0;
	char fname[40];

	if (fw_load_done)
		return 0;

#ifdef VPROG2_SENSOR
	intel_scu_ipc_msic_vprog2(1);
	msleep(500);
#endif
again:
	if (!request_firmware(&fw_entry, fname, dev)) {
		if (!fw_entry)
			return -ENOMEM;

		psh_debug("psh fw size %d virt:0x%p\n",
				(int)fw_entry->size, fw_entry->data);
		if (fw_entry->size > APP_IMR_SIZE) {
			psh_err("psh fw size too big\n");
		} else {
			struct ia_cmd cmd = {
				.cmd_id = CMD_RESET,
				.sensor_id = 0,
				};

			memcpy(plt_priv->imr2, fw_entry->data,
				fw_entry->size);
			*(uintptr_t *)(&cmd.param) = imr2_phy;
			cmd.tran_id = 0x1;
			if (process_send_cmd(ia_data, PSH2IA_CHANNEL3, &cmd, 7))
				return -1;
			ia_data->load_in_progress = 1;
			wait_for_completion_timeout(&ia_data->cmd_load_comp,
					3 * HZ);
			fw_load_done = 1;
		}
		release_firmware(fw_entry);
	} else {
		psh_err("cannot find psh firmware(%s)\n", fname);
		if (!load_default) {
			psh_err("try to load default psh.bin\n");
			snprintf(fname, 20, "psh.bin");
			load_default = 1;
			goto again;
		}
	}
	ia_lbuf_read_reset(ia_data->lbuf);
	*(unsigned long *)(&cmd_user.param) = ddr_phy;
	return ia_send_cmd(ia_data, &cmd_user, 7);
}

static void psh2ia_channel_handle(u32 msg, u32 param, void *data)
{
	struct pci_dev *pdev = (struct pci_dev *)data;
	struct psh_ia_priv *ia_data =
			(struct psh_ia_priv *)dev_get_drvdata(&pdev->dev);
	struct psh_plt_priv *plt_priv =
			(struct psh_plt_priv *)ia_data->platform_priv;
	u8 *dbuf = NULL;
	u16 size = 0;

	if (unlikely(ia_data->load_in_progress)) {
		ia_data->load_in_progress = 0;
		complete(&ia_data->cmd_load_comp);
		return;
	}

	while (!ia_lbuf_read_next(ia_data,
			&plt_priv->lbuf, &dbuf, &size)) {
		ia_handle_frame(ia_data, dbuf, size);
	}
	sysfs_notify(&pdev->dev.kobj, NULL, "data_size");
}

static int psh_imr_init(struct pci_dev *pdev,
			int imr_src, uintptr_t *phy_addr, void **virt_addr,
			unsigned size, int bar)
{
	struct page *pg;
	void __iomem *mem;
	int ret = 0;
	unsigned long start = 0, len;

	if (imr_src == imr_allocate) {
		/* dynamic alloct memory region */
		pg = alloc_pages(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO,
						get_order(size));
		if (!pg) {
			dev_err(&pdev->dev, "can not allocate app page imr buffer\n");
			ret = -ENOMEM;
			goto err;
		}
		*phy_addr = page_to_phys(pg);
		*virt_addr = page_address(pg);
	} else if (imr_src == imr_pci_shim) {
		/* dedicate isolated memory region */
		start = pci_resource_start(pdev, bar);
		len = pci_resource_len(pdev, bar);
		if (!start || !len) {
			dev_err(&pdev->dev, "bar %d address not set\n", bar);
			ret = -EINVAL;
			goto err;
		}

		ret = pci_request_region(pdev, bar, "psh");
		if (ret) {
			dev_err(&pdev->dev, "failed to request psh region "
				"0x%lx-0x%lx\n", start,
				(unsigned long)pci_resource_end(pdev, bar));
			goto err;
		}

		mem = ioremap_nocache(start, len);
		if (!mem) {
			dev_err(&pdev->dev, "can not ioremap app imr address\n");
			ret = -EINVAL;
			goto err_ioremap;
		}

		*phy_addr = start;
		*virt_addr = (void *)mem;
	} else {
		dev_err(&pdev->dev, "Invalid chip imr source\n");
		ret = -EINVAL;
		goto err;
	}

	return 0;

err_ioremap:
	pci_release_region(pdev, bar);
err:
	return ret;
}

static void psh_imr_free(int imr_src, void *virt_addr, unsigned size)
{
	if (imr_src == imr_allocate)
		__free_pages(virt_to_page(virt_addr), get_order(size));
	else if (imr_src == imr_pci_shim)
		iounmap((void __iomem *)virt_addr);
}

static int psh_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	int ret = -1;
	struct psh_ia_priv *ia_data;
	struct psh_plt_priv *plt_priv;

	ret = pci_enable_device(pdev);
	if (ret) {
		dev_err(&pdev->dev, "fail to enable psh pci device\n");
		goto pci_err;
	}

	plt_priv = kzalloc(sizeof(*plt_priv), GFP_KERNEL);
	if (!plt_priv) {
		dev_err(&pdev->dev, "can not allocate plt_priv\n");
		goto plt_err;
	}

	switch (intel_mid_identify_cpu()) {
	case INTEL_MID_CPU_CHIP_TANGIER:
		if (intel_mid_soc_stepping() == 0)
			plt_priv->imr_src = imr_allocate;
		else
			plt_priv->imr_src = imr_pci_shim;
		break;
	case INTEL_MID_CPU_CHIP_ANNIEDALE:
		plt_priv->imr_src = imr_pci_shim;
		break;
	default:
		dev_err(&pdev->dev, "error memory region\n");
		goto psh_imr2_err;
		break;
	}

	/* init IMR2 */
	ret = psh_imr_init(pdev, plt_priv->imr_src,
				&plt_priv->imr2_phy, &plt_priv->imr2,
				APP_IMR_SIZE, 0);
	if (ret)
		goto psh_imr2_err;


	/* init IMR3 */
	ret = psh_imr_init(pdev, plt_priv->imr_src,
				&plt_priv->ddr_phy, &plt_priv->ddr,
				BUF_IA_DDR_SIZE, 1);
	if (ret)
		goto psh_ddr_err;

	ret = psh_ia_common_init(&pdev->dev, &ia_data);
	if (ret) {
		dev_err(&pdev->dev, "fail to init psh_ia_common\n");
		goto psh_ia_err;
	}

	ia_lbuf_read_init(&plt_priv->lbuf,
				plt_priv->ddr,
				BUF_IA_DDR_SIZE, NULL);
	ia_data->lbuf = &plt_priv->lbuf;

	plt_priv->hwmon_dev = hwmon_device_register(&pdev->dev);
	if (!plt_priv->hwmon_dev) {
		dev_err(&pdev->dev, "fail to register hwmon device\n");
		goto hwmon_err;
	}

	ia_data->platform_priv = plt_priv;

	ret = intel_psh_ipc_bind(PSH_RECV_CH0, psh2ia_channel_handle, pdev);
	if (ret) {
		dev_err(&pdev->dev, "fail to bind channel\n");
		goto irq_err;
	}

	/* just put this dev into suspend status always, since this is fake */
	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_allow(&pdev->dev);
	return 0;

irq_err:
	hwmon_device_unregister(plt_priv->hwmon_dev);
hwmon_err:
	psh_ia_common_deinit(&pdev->dev);
psh_ia_err:
	psh_imr_free(plt_priv->imr_src, plt_priv->ddr, BUF_IA_DDR_SIZE);
psh_ddr_err:
	psh_imr_free(plt_priv->imr_src, plt_priv->imr2, APP_IMR_SIZE);
psh_imr2_err:
	kfree(plt_priv);
plt_err:
	pci_dev_put(pdev);
pci_err:
	return ret;
}
示例#26
0
文件: mcb-pci.c 项目: 7799/linux
static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct priv *priv;
	phys_addr_t mapbase;
	int ret;
	int num_cells;
	unsigned long flags;

	priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	ret = pci_enable_device(pdev);
	if (ret) {
		dev_err(&pdev->dev, "Failed to enable PCI device\n");
		return -ENODEV;
	}

	mapbase = pci_resource_start(pdev, 0);
	if (!mapbase) {
		dev_err(&pdev->dev, "No PCI resource\n");
		goto err_start;
	}

	ret = pci_request_region(pdev, 0, KBUILD_MODNAME);
	if (ret) {
		dev_err(&pdev->dev, "Failed to request PCI BARs\n");
		goto err_start;
	}

	priv->base = pci_iomap(pdev, 0, 0);
	if (!priv->base) {
		dev_err(&pdev->dev, "Cannot ioremap\n");
		ret = -ENOMEM;
		goto err_ioremap;
	}

	flags = pci_resource_flags(pdev, 0);
	if (flags & IORESOURCE_IO) {
		ret = -ENOTSUPP;
		dev_err(&pdev->dev,
			"IO mapped PCI devices are not supported\n");
		goto err_ioremap;
	}

	pci_set_drvdata(pdev, priv);

	priv->bus = mcb_alloc_bus();

	ret = chameleon_parse_cells(priv->bus, mapbase, priv->base);
	if (ret < 0)
		goto err_drvdata;
	num_cells = ret;

	dev_dbg(&pdev->dev, "Found %d cells\n", num_cells);

	mcb_bus_add_devices(priv->bus);

err_drvdata:
	pci_iounmap(pdev, priv->base);
err_ioremap:
	pci_release_region(pdev, 0);
err_start:
	pci_disable_device(pdev);
	return ret;
}
static int __devinit atomisp_pci_probe(struct pci_dev *dev,
					const struct pci_device_id *id)
{
	struct atomisp_device *isp = NULL;
	unsigned int start, len;
	void __iomem *base = NULL;
	int err = 0;

	if (!dev) {
		v4l2_err(&atomisp_dev, "atomisp: erorr device ptr\n");
		return -EINVAL;
	}

	atomisp_pci_vendor = id->vendor;
	atomisp_pci_device = id->device;

	err = pci_enable_device(dev);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to enable CI ISP device\n");
		return err;
	}

	start = pci_resource_start(dev, 0);
	len = pci_resource_len(dev, 0);

	err = pci_request_region(dev, 0, atomisp_pci_driver.name);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to request region 0x%1x-0x%Lx\n",
			    start, (unsigned long long)pci_resource_end(dev,
				0));
		goto request_region_fail;
	}

	base = ioremap_nocache(start, len);
	if (!base) {
		v4l2_err(&atomisp_dev,
			    "Failed to I/O memory remapping\n");
		err = -ENOMEM;
		goto ioremap_fail;
	}

	isp = kzalloc(sizeof(struct atomisp_device), GFP_KERNEL);
	if (!isp) {
		v4l2_err(&atomisp_dev, "Failed to alloc CI ISP structure\n");
		goto kzalloc_fail;
	}
	isp->sw_contex.probed = false;
	isp->sw_contex.init = false;
	isp->pdev = dev;
	isp->dev = &dev->dev;
	isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
	isp->hw_contex.pci_root = pci_get_bus_and_slot(0, 0);

	/* Load isp firmware from user space */
	/*
	 * fixing me:
	 * MRFLD VP does not use firmware loading
	 * from file system
	 */
	if (!IS_MRFLD) {
		isp->firmware = load_firmware(&dev->dev);
		if (!isp->firmware) {
			v4l2_err(&atomisp_dev, "Load firmwares failed\n");
			goto load_fw_fail;
		}
	}

	err = atomisp_initialize_modules(isp);
	if (err < 0) {
		v4l2_err(&atomisp_dev, "atomisp_initialize_modules\n");
		goto init_mod_fail;
	}

	err = atomisp_register_entities(isp);
	if (err < 0) {
		v4l2_err(&atomisp_dev, "atomisp_register_entities failed\n");
		goto init_mod_fail;
	}

	init_completion(&isp->wq_frame_complete);
	init_completion(&isp->dis_state_complete);
	spin_lock_init(&isp->irq_lock);

	isp->work_queue = create_singlethread_workqueue(isp->v4l2_dev.name);
	if (isp->work_queue == NULL) {
		v4l2_err(&atomisp_dev, "Failed to initialize work queue\n");
		goto work_queue_fail;
	}
	INIT_WORK(&isp->work, atomisp_work);

	isp->hw_contex.ispmmadr = start;

	pci_set_master(dev);
	atomisp_io_base = base;

	isp->tvnorm = tvnorms;
	mutex_init(&isp->input_lock);
	/* isp_lock is to protect race access of css functions */
	mutex_init(&isp->isp_lock);
	isp->sw_contex.updating_uptr = false;
	isp->isp3a_stat_ready = false;

	pci_set_drvdata(dev, isp);

	err = pci_enable_msi(dev);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to enable msi\n");
		goto enable_msi_fail;
	}
	err = request_irq(dev->irq, atomisp_isr,
			  IRQF_SHARED, "isp_irq", isp);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to request irq\n");
		goto request_irq_fail;
	}

	setup_timer(&isp->wdt, atomisp_wdt_wakeup_dog, (unsigned long)isp);

	atomisp_msi_irq_init(isp, dev);

	pm_qos_add_request(&isp->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);
	/*
	 * fixing me!
	 * MRFLD VP does not implement
	 * PM Core
	 */
#ifdef CONFIG_PM
	if (!IS_MRFLD) {
		pm_runtime_put_noidle(&dev->dev);
		pm_runtime_allow(&dev->dev);
	}
#endif
	isp->sw_contex.probed = true;

	err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
	if (err)
		v4l2_err(&atomisp_dev,
			    "Failed to register reserved memory pool.\n");

	return 0;

request_irq_fail:
	pci_disable_msi(dev);
enable_msi_fail:
	pci_set_drvdata(dev, NULL);
	destroy_workqueue(isp->work_queue);
work_queue_fail:
	atomisp_unregister_entities(isp);
init_mod_fail:
	release_firmware(isp->firmware);
load_fw_fail:
	kfree(isp);
kzalloc_fail:
	iounmap(base);
ioremap_fail:
	pci_release_region(dev, 0);
request_region_fail:
	pci_disable_device(dev);
	return err;
}
示例#28
0
文件: pci.c 项目: ANFS/ANFS-kernel
	 * The default setting of latency timer yields poor results,
	 * set it to the value used by other systems.  It may be worth
	 * tweaking this setting more.
	 */
	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);

	/* Enable bus mastering */
	pci_set_master(pdev);

	/*
	 * Disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state.
	 */
	pci_write_config_byte(pdev, 0x41, 0);

	ret = pci_request_region(pdev, 0, "ath5k");
	if (ret) {
		dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
		goto err_dis;
	}

	mem = pci_iomap(pdev, 0, 0);
	if (!mem) {
		dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
		ret = -EIO;
		goto err_reg;
	}

	/*
	 * Allocate hw (mac80211 main struct)
	 * and hw->priv (driver private data)
示例#29
0
static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent)
{
    struct saa7146_pci_extension_data *pci_ext = (struct saa7146_pci_extension_data *)ent->driver_data;
    struct saa7146_extension *ext = pci_ext->ext;
    struct saa7146_dev *dev;
    int err = -ENOMEM;

    /* clear out mem for sure */
    dev = kzalloc(sizeof(struct saa7146_dev), GFP_KERNEL);
    if (!dev) {
        ERR(("out of memory.\n"));
        goto out;
    }

    DEB_EE(("pci:%p\n",pci));

    err = pci_enable_device(pci);
    if (err < 0) {
        ERR(("pci_enable_device() failed.\n"));
        goto err_free;
    }

    /* enable bus-mastering */
    pci_set_master(pci);

    dev->pci = pci;

    /* get chip-revision; this is needed to enable bug-fixes */
    err = pci_read_config_dword(pci, PCI_CLASS_REVISION, &dev->revision);
    if (err < 0) {
        ERR(("pci_read_config_dword() failed.\n"));
        goto err_disable;
    }
    dev->revision &= 0xf;

    /* remap the memory from virtual to physical address */

    err = pci_request_region(pci, 0, "saa7146");
    if (err < 0)
        goto err_disable;

    dev->mem = ioremap(pci_resource_start(pci, 0),
                       pci_resource_len(pci, 0));
    if (!dev->mem) {
        ERR(("ioremap() failed.\n"));
        err = -ENODEV;
        goto err_release;
    }

    /* we don't do a master reset here anymore, it screws up
       some boards that don't have an i2c-eeprom for configuration
       values */
    /*
    	saa7146_write(dev, MC1, MASK_31);
    */

    /* disable all irqs */
    saa7146_write(dev, IER, 0);

    /* shut down all dma transfers and rps tasks */
    saa7146_write(dev, MC1, 0x30ff0000);

    /* clear out any rps-signals pending */
    saa7146_write(dev, MC2, 0xf8000000);

    /* request an interrupt for the saa7146 */
    err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED | IRQF_DISABLED,
                      dev->name, dev);
    if (err < 0) {
        ERR(("request_irq() failed.\n"));
        goto err_unmap;
    }

    err = -ENOMEM;

    /* get memory for various stuff */
    dev->d_rps0.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM,
                           &dev->d_rps0.dma_handle);
    if (!dev->d_rps0.cpu_addr)
        goto err_free_irq;
    memset(dev->d_rps0.cpu_addr, 0x0, SAA7146_RPS_MEM);

    dev->d_rps1.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM,
                           &dev->d_rps1.dma_handle);
    if (!dev->d_rps1.cpu_addr)
        goto err_free_rps0;
    memset(dev->d_rps1.cpu_addr, 0x0, SAA7146_RPS_MEM);

    dev->d_i2c.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM,
                          &dev->d_i2c.dma_handle);
    if (!dev->d_i2c.cpu_addr)
        goto err_free_rps1;
    memset(dev->d_i2c.cpu_addr, 0x0, SAA7146_RPS_MEM);

    /* the rest + print status message */

    /* create a nice device name */
    sprintf(dev->name, "saa7146 (%d)", saa7146_num);

    INFO(("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x).\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device));
    dev->ext = ext;

    mutex_init(&dev->lock);
    spin_lock_init(&dev->int_slock);
    spin_lock_init(&dev->slock);

    mutex_init(&dev->i2c_lock);

    dev->module = THIS_MODULE;
    init_waitqueue_head(&dev->i2c_wq);

    /* set some sane pci arbitrition values */
    saa7146_write(dev, PCI_BT_V1, 0x1c00101f);

    /* TODO: use the status code of the callback */

    err = -ENODEV;

    if (ext->probe && ext->probe(dev)) {
        DEB_D(("ext->probe() failed for %p. skipping device.\n",dev));
        goto err_free_i2c;
    }

    if (ext->attach(dev, pci_ext)) {
        DEB_D(("ext->attach() failed for %p. skipping device.\n",dev));
        goto err_free_i2c;
    }
    /* V4L extensions will set the pci drvdata to the v4l2_device in the
       attach() above. So for those cards that do not use V4L we have to
       set it explicitly. */
    pci_set_drvdata(pci, &dev->v4l2_dev);

    INIT_LIST_HEAD(&dev->item);
    list_add_tail(&dev->item,&saa7146_devices);
    saa7146_num++;

    err = 0;
out:
    return err;

err_free_i2c:
    pci_free_consistent(pci, SAA7146_RPS_MEM, dev->d_i2c.cpu_addr,
                        dev->d_i2c.dma_handle);
err_free_rps1:
    pci_free_consistent(pci, SAA7146_RPS_MEM, dev->d_rps1.cpu_addr,
                        dev->d_rps1.dma_handle);
err_free_rps0:
    pci_free_consistent(pci, SAA7146_RPS_MEM, dev->d_rps0.cpu_addr,
                        dev->d_rps0.dma_handle);
err_free_irq:
    free_irq(pci->irq, (void *)dev);
err_unmap:
    iounmap(dev->mem);
err_release:
    pci_release_region(pci, 0);
err_disable:
    pci_disable_device(pci);
err_free:
    kfree(dev);
    goto out;
}
static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
    struct wil6210_priv *wil;
    struct device *dev = &pdev->dev;
    void __iomem *csr;
    int rc;

    /* check HW */
    dev_info(&pdev->dev, WIL_NAME " device found [%04x:%04x] (rev %x)\n",
             (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);

    if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
        dev_err(&pdev->dev, "Not " WIL_NAME "? "
                "BAR0 size is %lu while expecting %lu\n",
                (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
        return -ENODEV;
    }

    rc = pci_enable_device(pdev);
    if (rc) {
        dev_err(&pdev->dev, "pci_enable_device failed\n");
        return -ENODEV;
    }
    /* rollback to err_disable_pdev */

    rc = pci_request_region(pdev, 0, WIL_NAME);
    if (rc) {
        dev_err(&pdev->dev, "pci_request_region failed\n");
        goto err_disable_pdev;
    }
    /* rollback to err_release_reg */

    csr = pci_ioremap_bar(pdev, 0);
    if (!csr) {
        dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
        rc = -ENODEV;
        goto err_release_reg;
    }
    /* rollback to err_iounmap */
    dev_info(&pdev->dev, "CSR at %pR -> %p\n", &pdev->resource[0], csr);

    wil = wil_if_alloc(dev, csr);
    if (IS_ERR(wil)) {
        rc = (int)PTR_ERR(wil);
        dev_err(dev, "wil_if_alloc failed: %d\n", rc);
        goto err_iounmap;
    }
    /* rollback to if_free */

    pci_set_drvdata(pdev, wil);
    wil->pdev = pdev;

    wil6210_clear_irq(wil);
    /* FW should raise IRQ when ready */
    rc = wil_if_pcie_enable(wil);
    if (rc) {
        wil_err(wil, "Enable device failed\n");
        goto if_free;
    }
    /* rollback to bus_disable */

    rc = wil_if_add(wil);
    if (rc) {
        wil_err(wil, "wil_if_add failed: %d\n", rc);
        goto bus_disable;
    }

    wil6210_debugfs_init(wil);

    /* check FW is alive */
    wmi_echo(wil);

    return 0;

bus_disable:
    wil_if_pcie_disable(wil);
if_free:
    wil_if_free(wil);
err_iounmap:
    pci_iounmap(pdev, csr);
err_release_reg:
    pci_release_region(pdev, 0);
err_disable_pdev:
    pci_disable_device(pdev);

    return rc;
}