Пример #1
0
/*
 * Do all the common PCIe setup and initialization.
 * devdata is not yet allocated, and is not allocated until after this
 * routine returns success.  Therefore qib_dev_err() can't be used for error
 * printing.
 */
int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	int ret;

	ret = pci_enable_device(pdev);
	if (ret) {
		/*
		 * This can happen (in theory) iff:
		 * We did a chip reset, and then failed to reprogram the
		 * BAR, or the chip reset due to an internal error.  We then
		 * unloaded the driver and reloaded it.
		 *
		 * Both reset cases set the BAR back to initial state.  For
		 * the latter case, the AER sticky error bit at offset 0x718
		 * should be set, but the Linux kernel doesn't yet know
		 * about that, it appears.  If the original BAR was retained
		 * in the kernel data structures, this may be OK.
		 */
		qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
			      -ret);
		goto done;
	}

	ret = pci_request_regions(pdev, QIB_DRV_NAME);
	if (ret) {
		qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
		goto bail;
	}

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (ret) {
		/*
		 * If the 64 bit setup fails, try 32 bit.  Some systems
		 * do not setup 64 bit maps on systems with 2GB or less
		 * memory installed.
		 */
		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (ret) {
			qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
			goto bail;
		}
		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	} else
		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (ret) {
		qib_early_err(&pdev->dev,
			      "Unable to set DMA consistent mask: %d\n", ret);
		goto bail;
	}

	pci_set_master(pdev);
	ret = pci_enable_pcie_error_reporting(pdev);
	if (ret) {
		qib_early_err(&pdev->dev,
			      "Unable to enable pcie error reporting: %d\n",
			      ret);
		ret = 0;
	}
	goto done;

bail:
	pci_disable_device(pdev);
	pci_release_regions(pdev);
done:
	return ret;
}
Пример #2
0
/**
 * mei_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in kcs_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct mei_device *dev;
	struct mei_me_hw *hw;
	int err;


	if (!mei_me_quirk_probe(pdev, ent)) {
		err = -ENODEV;
		goto end;
	}

	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for mei driver */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "failed to get pci regions.\n");
		goto disable_device;
	}

	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {

		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
		if (err)
			err = dma_set_coherent_mask(&pdev->dev,
						    DMA_BIT_MASK(32));
	}
	if (err) {
		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
		goto release_regions;
	}


	/* allocates and initializes the mei dev structure */
	dev = mei_me_dev_init(pdev);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	hw = to_me_hw(dev);
	/* mapping  IO device memory */
	hw->mem_addr = pci_iomap(pdev, 0, 0);
	if (!hw->mem_addr) {
		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
		err = -ENOMEM;
		goto free_device;
	}
	pci_enable_msi(pdev);

	 /* request and enable interrupt */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_me_irq_thread_handler,
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
	else
		err = request_threaded_irq(pdev->irq,
			mei_me_irq_quick_handler,
			mei_me_irq_thread_handler,
			IRQF_SHARED, KBUILD_MODNAME, dev);

	if (err) {
		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
		       pdev->irq);
		goto disable_msi;
	}

	if (mei_start(dev)) {
		dev_err(&pdev->dev, "init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}

	err = mei_register(dev);
	if (err)
		goto release_irq;

	pci_set_drvdata(pdev, dev);

	schedule_delayed_work(&dev->timer_work, HZ);

	dev_dbg(&pdev->dev, "initialization successful.\n");

	return 0;

release_irq:
	mei_cancel_work(dev);
	mei_disable_interrupts(dev);
	free_irq(pdev->irq, dev);
disable_msi:
	pci_disable_msi(pdev);
	pci_iounmap(pdev, hw->mem_addr);
free_device:
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	dev_err(&pdev->dev, "initialization failed.\n");
	return err;
}
static int cs5530_init_chip(void)
{
	struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL;

	while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
		switch (dev->device) {
			case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
				master_0 = pci_dev_get(dev);
				break;
			case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
				cs5530_0 = pci_dev_get(dev);
				break;
		}
	}
	if (!master_0) {
		printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
		goto fail_put;
	}
	if (!cs5530_0) {
		printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
		goto fail_put;
	}

	pci_set_master(cs5530_0);
	pci_try_set_mwi(cs5530_0);

	/*
	 * Set PCI CacheLineSize to 16-bytes:
	 * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530
	 *
	 * Note: This value is constant because the 5530 is only a Geode companion
	 */

	pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);

	/*
	 * Disable trapping of UDMA register accesses (Win98 hack):
	 * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530
	 */

	pci_write_config_word(cs5530_0, 0xd0, 0x5006);

	/*
	 * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus:
	 * The other settings are what is necessary to get the register
	 * into a sane state for IDE DMA operation.
	 */

	pci_write_config_byte(master_0, 0x40, 0x1e);

	/*
	 * Set max PCI burst size (16-bytes seems to work best):
	 *	   16bytes: set bit-1 at 0x41 (reg value of 0x16)
	 *	all others: clear bit-1 at 0x41, and do:
	 *	  128bytes: OR 0x00 at 0x41
	 *	  256bytes: OR 0x04 at 0x41
	 *	  512bytes: OR 0x08 at 0x41
	 *	 1024bytes: OR 0x0c at 0x41
	 */

	pci_write_config_byte(master_0, 0x41, 0x14);

	/*
	 * These settings are necessary to get the chip
	 * into a sane state for IDE DMA operation.
	 */

	pci_write_config_byte(master_0, 0x42, 0x00);
	pci_write_config_byte(master_0, 0x43, 0xc1);

	pci_dev_put(master_0);
	pci_dev_put(cs5530_0);
	return 0;
fail_put:
	if (master_0)
		pci_dev_put(master_0);
	if (cs5530_0)
		pci_dev_put(cs5530_0);
	return -ENODEV;
}
Пример #4
0
// chip-specific constructor
// (see "Management of Cards and Components")
static int __devinit
snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
{
	vortex_t *chip;
	int err;
	static struct snd_device_ops ops = {
		.dev_free = snd_vortex_dev_free,
	};

	*rchip = NULL;

	// check PCI availability (DMA).
	if ((err = pci_enable_device(pci)) < 0)
		return err;
	if (pci_set_dma_mask(pci, DMA_32BIT_MASK) < 0 ||
	    pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK) < 0) {
		printk(KERN_ERR "error to set DMA mask\n");
		pci_disable_device(pci);
		return -ENXIO;
	}

	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
	if (chip == NULL) {
		pci_disable_device(pci);
		return -ENOMEM;
	}

	chip->card = card;

	// initialize the stuff
	chip->pci_dev = pci;
	chip->io = pci_resource_start(pci, 0);
	chip->vendor = pci->vendor;
	chip->device = pci->device;
	chip->card = card;
	chip->irq = -1;

	// (1) PCI resource allocation
	// Get MMIO area
	//
	if ((err = pci_request_regions(pci, CARD_NAME_SHORT)) != 0)
		goto regions_out;

	chip->mmio = ioremap_nocache(pci_resource_start(pci, 0),
	                             pci_resource_len(pci, 0));
	if (!chip->mmio) {
		printk(KERN_ERR "MMIO area remap failed.\n");
		err = -ENOMEM;
		goto ioremap_out;
	}

	/* Init audio core.
	 * This must be done before we do request_irq otherwise we can get spurious
	 * interrupts that we do not handle properly and make a mess of things */
	if ((err = vortex_core_init(chip)) != 0) {
		printk(KERN_ERR "hw core init failed\n");
		goto core_out;
	}

	if ((err = request_irq(pci->irq, vortex_interrupt,
	                       IRQF_SHARED, CARD_NAME_SHORT,
	                       chip)) != 0) {
		printk(KERN_ERR "cannot grab irq\n");
		goto irq_out;
	}
	chip->irq = pci->irq;

	pci_set_master(pci);
	// End of PCI setup.

	// Register alsa root device.
	if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
		goto alloc_out;
	}

	snd_card_set_dev(card, &pci->dev);

	*rchip = chip;

	return 0;

      alloc_out:
	free_irq(chip->irq, chip);
      irq_out:
	vortex_core_shutdown(chip);
      core_out:
	iounmap(chip->mmio);
      ioremap_out:
	pci_release_regions(chip->pci_dev);
      regions_out:
	pci_disable_device(chip->pci_dev);
	//FIXME: this not the right place to unregister the gameport
	vortex_gameport_unregister(chip);
	kfree(chip);
	return err;
}
Пример #5
0
static int dabpci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
    int result;
    printk(KERN_INFO "init DAB PCIe.");
    printk(KERN_INFO "the vendor:0x%x.\nthe device:0x%x.\n",dev->vendor,dev->device);

    /*adapt the space for private*/
    DABDrv = kmalloc(sizeof(struct dab_dev),GFP_KERNEL);

    if(unlikely(!DABDrv))
        return -ENOMEM;
    
    DABDrv->pci_dev = dev;

    /* Enable PCI*/
    result = pci_enable_device(dev);
    
    if(unlikely(result))
        goto disable_pci;
    /*set the pci dma mode*/
    pci_set_master(dev);
    result = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
    if(unlikely(result))
    {
        printk( KERN_ERR "DABPCI: 32-bits PCI DMA address not supported!\n");
        goto disable_pci;
    }
    else
        printk( KERN_INFO "DABPCI: set dma mask successfuly!\n");

    /*request I/O resource */

    DABDrv->mmio_start = pci_resource_start(dev,0);
    DABDrv->mmio_end = pci_resource_end(dev,0);
    DABDrv->mmio_flags = pci_resource_flags(dev,0);
    DABDrv->mmio_len = pci_resource_len(dev,0);

    if(unlikely(!(DABDrv->mmio_flags & IORESOURCE_MEM)))
    {
        printk(KERN_ERR "DABPCI: Failed to alloc IO mem!\n");
    }


    result = pci_request_regions(dev, DAB_NAME);
    if(unlikely(result))
    {
        printk(KERN_ERR "DABPCI: Failed to request IO space!\n");
        goto disable_pci;
    }
    
    DABDrv->mmio_addr = ioremap(DABDrv->mmio_start,DABDrv->mmio_len);
    if(unlikely(!DABDrv->mmio_addr))
    {
        result = -EIO;
        printk(KERN_ERR "DABPCI: Failed to do IO remap!\n");
    }

    /*enable MSI*/
    result = pci_enable_msi(dev);
    if(unlikely(result))
    {
        printk(KERN_ERR "DABPCI: Failed to enable msi!\n");
        return result;
    }

    DABDrv->m_irq = dev->irq;
    result = request_irq(DABDrv->m_irq,DABDrv_interrupt,IRQF_DISABLED,DAB_NAME,NULL);
    if(unlikely(result))
    {
        printk(KERN_ERR "DABPCI: Failed to request interrup.\n");
        goto disable_pci;
    }

    dabpci_major = register_chrdev(0,DAB_CLASS_NAME,&DABDrv_fops);
    if(dabpci_major<0)
    {
        printk(KERN_ERR "DABPCI: Failed to get the major device number:%d;\n",dabpci_major);
        goto disable_pci;
    }
    else
    {
        printk(KERN_INFO "DABPCI: Get the major device number succesful,major = %d\n",dabpci_major);
    }
    




    return 0;


    disable_pci:
        pci_disable_device(dev);
        return result;
}
Пример #6
0
static int __devinit fnic_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	struct Scsi_Host *host;
	struct fc_lport *lp;
	struct fnic *fnic;
	mempool_t *pool;
	int err;
	int i;
	unsigned long flags;

	/*
	 * Allocate SCSI Host and set up association between host,
	 * local port, and fnic
	 */
	lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
	if (!lp) {
		printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
		err = -ENOMEM;
		goto err_out;
	}
	host = lp->host;
	fnic = lport_priv(lp);
	fnic->lport = lp;
	fnic->ctlr.lp = lp;

	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
		 host->host_no);

	host->transportt = fnic_fc_transport;

	err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to alloc shared tag map\n");
		goto err_out_free_hba;
	}

	/* Setup PCI resources */
	pci_set_drvdata(pdev, fnic);

	fnic->pdev = pdev;

	err = pci_enable_device(pdev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI device, aborting.\n");
		goto err_out_free_hba;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI resources, aborting\n");
		goto err_out_disable_device;
	}

	pci_set_master(pdev);

	/* Query PCI controller on system for DMA addressing
	 * limitation for the device.  Try 40-bit first, and
	 * fail to 32-bit.
	 */
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "No usable DMA configuration "
				     "aborting\n");
			goto err_out_release_regions;
		}
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 32-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	} else {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 40-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	}

	/* Map vNIC resources from BAR0 */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "BAR0 not memory-map'able, aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
	fnic->bar0.len = pci_resource_len(pdev, 0);

	if (!fnic->bar0.vaddr) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot memory-map BAR0 res hdr, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
	if (!fnic->vdev) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC registration failed, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_iounmap;
	}

	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
			    vnic_dev_open_done, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev open failed, aborting.\n");
		goto err_out_vnic_unregister;
	}

	err = vnic_dev_init(fnic->vdev, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev init failed, aborting.\n");
		goto err_out_dev_close;
	}

	err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC get MAC addr failed \n");
		goto err_out_dev_close;
	}
	/* set data_src for point-to-point mode and to keep it non-zero */
	memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);

	/* Get vNIC configuration */
	err = fnic_get_vnic_config(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Get vNIC configuration failed, "
			     "aborting.\n");
		goto err_out_dev_close;
	}
	host->max_lun = fnic->config.luns_per_tgt;
	host->max_id = FNIC_MAX_FCP_TARGET;
	host->max_cmd_len = FNIC_MAX_CMD_LEN;

	fnic_get_res_counts(fnic);

	err = fnic_set_intr_mode(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to set intr mode, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	err = fnic_alloc_vnic_resources(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc vNIC resources, "
			     "aborting.\n");
		goto err_out_clear_intr;
	}


	/* initialize all fnic locks */
	spin_lock_init(&fnic->fnic_lock);

	for (i = 0; i < FNIC_WQ_MAX; i++)
		spin_lock_init(&fnic->wq_lock[i]);

	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
		spin_lock_init(&fnic->wq_copy_lock[i]);
		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
		fnic->fw_ack_recd[i] = 0;
		fnic->fw_ack_index[i] = -1;
	}

	for (i = 0; i < FNIC_IO_LOCKS; i++)
		spin_lock_init(&fnic->io_req_lock[i]);

	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
	if (!fnic->io_req_pool)
		goto err_out_free_resources;

	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
			      fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
	if (!pool)
		goto err_out_free_ioreq_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;

	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
			      fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
	if (!pool)
		goto err_out_free_dflt_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;

	/* setup vlan config, hw inserts vlan header */
	fnic->vlan_hw_insert = 1;
	fnic->vlan_id = 0;

	/* Initialize the FIP fcoe_ctrl struct */
	fnic->ctlr.send = fnic_eth_send;
	fnic->ctlr.update_mac = fnic_update_mac;
	fnic->ctlr.get_src_addr = fnic_get_mac;
	fcoe_ctlr_init(&fnic->ctlr);
	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware supports FIP\n");
		/* enable directed and multicast */
		vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
		vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
		vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
	} else {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware uses non-FIP mode\n");
		fnic->ctlr.mode = FIP_ST_NON_FIP;
	}
	fnic->state = FNIC_IN_FC_MODE;

	/* Enable hardware stripping of vlan header on ingress */
	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);

	/* Setup notification buffer area */
	err = fnic_notify_set(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc notify buffer, aborting.\n");
		goto err_out_free_max_pool;
	}

	/* Setup notify timer when using MSI interrupts */
	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		setup_timer(&fnic->notify_timer,
			    fnic_notify_timer, (unsigned long)fnic);

	/* allocate RQ buffers and post them to RQ*/
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "fnic_alloc_rq_frame can't alloc "
				     "frame\n");
			goto err_out_free_rq_buf;
		}
	}

	/*
	 * Initialization done with PCI system, hardware, firmware.
	 * Add host to SCSI
	 */
	err = scsi_add_host(lp->host, &pdev->dev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "fnic: scsi_add_host failed...exiting\n");
		goto err_out_free_rq_buf;
	}

	/* Start local port initiatialization */

	lp->link_up = 0;
	lp->tt = fnic_transport_template;

	lp->max_retry_count = fnic->config.flogi_retries;
	lp->max_rport_retry_count = fnic->config.plogi_retries;
	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
			      FCP_SPPF_CONF_COMPL);
	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
		lp->service_params |= FCP_SPPF_RETRY;

	lp->boot_time = jiffies;
	lp->e_d_tov = fnic->config.ed_tov;
	lp->r_a_tov = fnic->config.ra_tov;
	lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
	fc_set_wwnn(lp, fnic->config.node_wwn);
	fc_set_wwpn(lp, fnic->config.port_wwn);

	fc_lport_init(lp);
	fc_exch_init(lp);
	fc_elsct_init(lp);
	fc_rport_init(lp);
	fc_disc_init(lp);

	if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
			       FCPIO_HOST_EXCH_RANGE_END, NULL)) {
		err = -ENOMEM;
		goto err_out_remove_scsi_host;
	}

	fc_lport_init_stats(lp);

	fc_lport_config(lp);

	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
		       sizeof(struct fc_frame_header))) {
		err = -EINVAL;
		goto err_out_free_exch_mgr;
	}
	fc_host_maxframe_size(lp->host) = lp->mfs;

	sprintf(fc_host_symbolic_name(lp->host),
		DRV_NAME " v" DRV_VERSION " over %s", fnic->name);

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_add_tail(&fnic->list, &fnic_list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

	INIT_WORK(&fnic->link_work, fnic_handle_link);
	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
	skb_queue_head_init(&fnic->frame_queue);
	skb_queue_head_init(&fnic->tx_queue);

	/* Enable all queues */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_enable(&fnic->wq[i]);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_enable(&fnic->rq[i]);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_enable(&fnic->wq_copy[i]);

	fc_fabric_login(lp);

	vnic_dev_enable(fnic->vdev);

	err = fnic_request_intr(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to request irq.\n");
		goto err_out_free_exch_mgr;
	}

	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_unmask(&fnic->intr[i]);

	fnic_notify_timer_start(fnic);

	return 0;

err_out_free_exch_mgr:
	fc_exch_mgr_free(lp);
err_out_remove_scsi_host:
	fc_remove_host(lp->host);
	scsi_remove_host(lp->host);
err_out_free_rq_buf:
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
	mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
	fnic_free_vnic_resources(fnic);
err_out_clear_intr:
	fnic_clear_intr_mode(fnic);
err_out_dev_close:
	vnic_dev_close(fnic->vdev);
err_out_vnic_unregister:
	vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
	fnic_iounmap(fnic);
err_out_release_regions:
	pci_release_regions(pdev);
err_out_disable_device:
	pci_disable_device(pdev);
err_out_free_hba:
	scsi_host_put(lp->host);
err_out:
	return err;
}
Пример #7
0
int mite_setup2(struct mite_struct *mite, unsigned use_iodwbsr_1)
{
	unsigned long length;
	resource_size_t addr;
	int i;
	u32 csigr_bits;
	unsigned unknown_dma_burst_bits;

	if (comedi_pci_enable(mite->pcidev, "mite")) {
		printk(KERN_ERR "error enabling mite and requesting io regions\n");
		return -EIO;
	}
	pci_set_master(mite->pcidev);

	addr = pci_resource_start(mite->pcidev, 0);
	mite->mite_phys_addr = addr;
	mite->mite_io_addr = ioremap(addr, PCI_MITE_SIZE);
	if (!mite->mite_io_addr) {
		printk(KERN_ERR "Failed to remap mite io memory address\n");
		return -ENOMEM;
	}
	printk(KERN_INFO "MITE:0x%08llx mapped to %p ",
	       (unsigned long long)mite->mite_phys_addr, mite->mite_io_addr);

	addr = pci_resource_start(mite->pcidev, 1);
	mite->daq_phys_addr = addr;
	length = pci_resource_len(mite->pcidev, 1);
	mite->daq_io_addr = ioremap(mite->daq_phys_addr, length);
	if (!mite->daq_io_addr) {
		printk(KERN_ERR "Failed to remap daq io memory address\n");
		return -ENOMEM;
	}
	printk(KERN_INFO "DAQ:0x%08llx mapped to %p\n",
	       (unsigned long long)mite->daq_phys_addr, mite->daq_io_addr);

	if (use_iodwbsr_1) {
		writel(0, mite->mite_io_addr + MITE_IODWBSR);
		printk(KERN_INFO "mite: using I/O Window Base Size register 1\n");
		writel(mite->daq_phys_addr | WENAB |
		       MITE_IODWBSR_1_WSIZE_bits(length),
		       mite->mite_io_addr + MITE_IODWBSR_1);
		writel(0, mite->mite_io_addr + MITE_IODWCR_1);
	} else {
		writel(mite->daq_phys_addr | WENAB,
		       mite->mite_io_addr + MITE_IODWBSR);
	}
	unknown_dma_burst_bits =
	    readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
	unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
	writel(unknown_dma_burst_bits,
	       mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);

	csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
	mite->num_channels = mite_csigr_dmac(csigr_bits);
	if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
		printk(KERN_WARNING "mite: bug? chip claims to have %i dma "
		       "channels. Setting to %i.\n",
		       mite->num_channels, MAX_MITE_DMA_CHANNELS);
		mite->num_channels = MAX_MITE_DMA_CHANNELS;
	}
	dump_chip_signature(csigr_bits);
	for (i = 0; i < mite->num_channels; i++) {
		writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
		
		writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
		       CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
		       CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
		       mite->mite_io_addr + MITE_CHCR(i));
	}
	mite->fifo_size = mite_fifo_size(mite, 0);
	printk(KERN_INFO "mite: fifo size is %i.\n", mite->fifo_size);
	mite->used = 1;

	return 0;
}
Пример #8
0
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
	struct vmw_private *dev_priv;
	int ret;
	uint32_t svga_id;
	enum vmw_res_type i;
	bool refuse_dma = false;
	char host_log[100] = {0};

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (unlikely(dev_priv == NULL)) {
		DRM_ERROR("Failed allocating a device private struct.\n");
		return -ENOMEM;
	}

	pci_set_master(dev->pdev);

	dev_priv->dev = dev;
	dev_priv->vmw_chipset = chipset;
	dev_priv->last_read_seqno = (uint32_t) -100;
	mutex_init(&dev_priv->cmdbuf_mutex);
	mutex_init(&dev_priv->release_mutex);
	mutex_init(&dev_priv->binding_mutex);
	mutex_init(&dev_priv->global_kms_state_mutex);
	rwlock_init(&dev_priv->resource_lock);
	ttm_lock_init(&dev_priv->reservation_sem);
	spin_lock_init(&dev_priv->hw_lock);
	spin_lock_init(&dev_priv->waiter_lock);
	spin_lock_init(&dev_priv->cap_lock);
	spin_lock_init(&dev_priv->svga_lock);

	for (i = vmw_res_context; i < vmw_res_max; ++i) {
		idr_init(&dev_priv->res_idr[i]);
		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
	}

	mutex_init(&dev_priv->init_mutex);
	init_waitqueue_head(&dev_priv->fence_queue);
	init_waitqueue_head(&dev_priv->fifo_queue);
	dev_priv->fence_queue_waiters = 0;
	dev_priv->fifo_queue_waiters = 0;

	dev_priv->used_memory_size = 0;

	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);

	dev_priv->assume_16bpp = !!vmw_assume_16bpp;

	dev_priv->enable_fb = enable_fbdev;

	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
	if (svga_id != SVGA_ID_2) {
		ret = -ENOSYS;
		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
		goto out_err0;
	}

	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
	ret = vmw_dma_select_mode(dev_priv);
	if (unlikely(ret != 0)) {
		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
		refuse_dma = true;
	}

	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);

	vmw_get_initial_size(dev_priv);

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		dev_priv->max_gmr_ids =
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
		dev_priv->max_gmr_pages =
			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
		dev_priv->memory_size =
			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
		dev_priv->memory_size -= dev_priv->vram_size;
	} else {
		/*
		 * An arbitrary limit of 512MiB on surface
		 * memory. But all HWV8 hardware supports GMR2.
		 */
		dev_priv->memory_size = 512*1024*1024;
	}
	dev_priv->max_mob_pages = 0;
	dev_priv->max_mob_size = 0;
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		uint64_t mem_size =
			vmw_read(dev_priv,
				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);

		/*
		 * Workaround for low memory 2D VMs to compensate for the
		 * allocation taken by fbdev
		 */
		if (!(dev_priv->capabilities & SVGA_CAP_3D))
			mem_size *= 2;

		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
		dev_priv->prim_bb_mem =
			vmw_read(dev_priv,
				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
		dev_priv->max_mob_size =
			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
		dev_priv->stdu_max_width =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
		dev_priv->stdu_max_height =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);

		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
		dev_priv->texture_max_width = vmw_read(dev_priv,
						       SVGA_REG_DEV_CAP);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
		dev_priv->texture_max_height = vmw_read(dev_priv,
							SVGA_REG_DEV_CAP);
	} else {
		dev_priv->texture_max_width = 8192;
		dev_priv->texture_max_height = 8192;
		dev_priv->prim_bb_mem = dev_priv->vram_size;
	}

	vmw_print_capabilities(dev_priv->capabilities);

	ret = vmw_dma_masks(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		DRM_INFO("Max GMR ids is %u\n",
			 (unsigned)dev_priv->max_gmr_ids);
		DRM_INFO("Max number of GMR pages is %u\n",
			 (unsigned)dev_priv->max_gmr_pages);
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
			 (unsigned)dev_priv->memory_size / 1024);
	}
	DRM_INFO("Maximum display memory size is %u kiB\n",
		 dev_priv->prim_bb_mem / 1024);
	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
		 dev_priv->vram_start, dev_priv->vram_size / 1024);
	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);

	ret = vmw_ttm_global_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;


	vmw_master_init(&dev_priv->fbdev_master);
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	dev_priv->active_master = &dev_priv->fbdev_master;

	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
				       dev_priv->mmio_size, MEMREMAP_WB);

	if (unlikely(dev_priv->mmio_virt == NULL)) {
		ret = -ENOMEM;
		DRM_ERROR("Failed mapping MMIO.\n");
		goto out_err3;
	}

	/* Need mmio memory to check for fifo pitchlock cap. */
	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
	    !vmw_fifo_have_pitchlock(dev_priv)) {
		ret = -ENOSYS;
		DRM_ERROR("Hardware has no pitchlock\n");
		goto out_err4;
	}

	dev_priv->tdev = ttm_object_device_init
		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);

	if (unlikely(dev_priv->tdev == NULL)) {
		DRM_ERROR("Unable to initialize TTM object management.\n");
		ret = -ENOMEM;
		goto out_err4;
	}

	dev->dev_private = dev_priv;

	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
	dev_priv->stealth = (ret != 0);
	if (dev_priv->stealth) {
		/**
		 * Request at least the mmio PCI resource.
		 */

		DRM_INFO("It appears like vesafb is loaded. "
			 "Ignore above error if any.\n");
		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
		if (unlikely(ret != 0)) {
			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
			goto out_no_device;
		}
	}

	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
		ret = drm_irq_install(dev, dev->pdev->irq);
		if (ret != 0) {
			DRM_ERROR("Failed installing irq: %d\n", ret);
			goto out_no_irq;
		}
	}

	dev_priv->fman = vmw_fence_manager_init(dev_priv);
	if (unlikely(dev_priv->fman == NULL)) {
		ret = -ENOMEM;
		goto out_no_fman;
	}

	ret = ttm_bo_device_init(&dev_priv->bdev,
				 dev_priv->bo_global_ref.ref.object,
				 &vmw_bo_driver,
				 dev->anon_inode->i_mapping,
				 VMWGFX_FILE_PAGE_OFFSET,
				 false);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
		goto out_no_bdev;
	}

	/*
	 * Enable VRAM, but initially don't use it until SVGA is enabled and
	 * unhidden.
	 */
	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
			     (dev_priv->vram_size >> PAGE_SHIFT));
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
		goto out_no_vram;
	}
	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;

	dev_priv->has_gmr = true;
	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
					 VMW_PL_GMR) != 0) {
		DRM_INFO("No GMR memory available. "
			 "Graphics memory resources are very limited.\n");
		dev_priv->has_gmr = false;
	}

	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		dev_priv->has_mob = true;
		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
				   VMW_PL_MOB) != 0) {
			DRM_INFO("No MOB memory available. "
				 "3D will be disabled.\n");
			dev_priv->has_mob = false;
		}
	}

	if (dev_priv->has_mob) {
		spin_lock(&dev_priv->cap_lock);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		spin_unlock(&dev_priv->cap_lock);
	}


	ret = vmw_kms_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_no_kms;
	vmw_overlay_init(dev_priv);

	ret = vmw_request_device(dev_priv);
	if (ret)
		goto out_no_fifo;

	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");

	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
		VMWGFX_REPO, VMWGFX_GIT_VERSION);
	vmw_host_log(host_log);

	memset(host_log, 0, sizeof(host_log));
	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
		VMWGFX_DRIVER_PATCHLEVEL);
	vmw_host_log(host_log);

	if (dev_priv->enable_fb) {
		vmw_fifo_resource_inc(dev_priv);
		vmw_svga_enable(dev_priv);
		vmw_fb_init(dev_priv);
	}

	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
	register_pm_notifier(&dev_priv->pm_nb);

	return 0;

out_no_fifo:
	vmw_overlay_close(dev_priv);
	vmw_kms_close(dev_priv);
out_no_kms:
	if (dev_priv->has_mob)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
	if (dev_priv->has_gmr)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
	(void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
	vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
		drm_irq_uninstall(dev_priv->dev);
out_no_irq:
	if (dev_priv->stealth)
		pci_release_region(dev->pdev, 2);
	else
		pci_release_regions(dev->pdev);
out_no_device:
	ttm_object_device_release(&dev_priv->tdev);
out_err4:
	memunmap(dev_priv->mmio_virt);
out_err3:
	vmw_ttm_global_release(dev_priv);
out_err0:
	for (i = vmw_res_context; i < vmw_res_max; ++i)
		idr_destroy(&dev_priv->res_idr[i]);

	if (dev_priv->ctx.staged_bindings)
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
	kfree(dev_priv);
	return ret;
}
Пример #9
0
static int xilly_probe(struct pci_dev *pdev,
		       const struct pci_device_id *ent)
{
	struct xilly_endpoint *endpoint;
	int rc;

	endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw);

	if (!endpoint)
		return -ENOMEM;

	pci_set_drvdata(pdev, endpoint);

	rc = pcim_enable_device(pdev);
	if (rc) {
		dev_err(endpoint->dev,
			"pcim_enable_device() failed. Aborting.\n");
		return rc;
	}

	/* L0s has caused packet drops. No power saving, thank you. */

	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);

	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		dev_err(endpoint->dev,
			"Incorrect BAR configuration. Aborting.\n");
		return -ENODEV;
	}

	rc = pcim_iomap_regions(pdev, 0x01, xillyname);
	if (rc) {
		dev_err(endpoint->dev,
			"pcim_iomap_regions() failed. Aborting.\n");
		return rc;
	}

	endpoint->registers = pcim_iomap_table(pdev)[0];

	pci_set_master(pdev);

	/* Set up a single MSI interrupt */
	if (pci_enable_msi(pdev)) {
		dev_err(endpoint->dev,
			"Failed to enable MSI interrupts. Aborting.\n");
		return -ENODEV;
	}
	rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0,
			      xillyname, endpoint);
	if (rc) {
		dev_err(endpoint->dev,
			"Failed to register MSI handler. Aborting.\n");
		return -ENODEV;
	}

	/*
	 * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets,
	 * even when the PCIe driver claims that a 64-bit mask is OK. On the
	 * other hand, on some architectures, 64-bit addressing is mandatory.
	 * So go for the 64-bit mask only when failing is the other option.
	 */

	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		endpoint->dma_using_dac = 0;
	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
		endpoint->dma_using_dac = 1;
	} else {
		dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
		return -ENODEV;
	}

	return xillybus_endpoint_discovery(endpoint);
}
Пример #10
0
static int
ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	char		 buf[80];
	const uint64_t	 mask_39bit = 0x7FFFFFFFFFULL;
	struct		 ahc_softc *ahc;
	ahc_dev_softc_t	 pci;
	const struct ahc_pci_identity *entry;
	char		*name;
	int		 error;
	struct device	*dev = &pdev->dev;

	pci = pdev;
	entry = ahc_find_pci_device(pci);
	if (entry == NULL)
		return (-ENODEV);

	/*
	 * Allocate a softc for this card and
	 * set it up for attachment by our
	 * common detect routine.
	 */
	sprintf(buf, "ahc_pci:%d:%d:%d",
		ahc_get_pci_bus(pci),
		ahc_get_pci_slot(pci),
		ahc_get_pci_function(pci));
	name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
	if (name == NULL)
		return (-ENOMEM);
	strcpy(name, buf);
	ahc = ahc_alloc(NULL, name);
	if (ahc == NULL)
		return (-ENOMEM);
	if (pci_enable_device(pdev)) {
		ahc_free(ahc);
		return (-ENODEV);
	}
	pci_set_master(pdev);

	if (sizeof(dma_addr_t) > 4
	    && ahc->features & AHC_LARGE_SCBS
	    && dma_set_mask(dev, mask_39bit) == 0
	    && dma_get_required_mask(dev) > DMA_32BIT_MASK) {
		ahc->flags |= AHC_39BIT_ADDRESSING;
	} else {
		if (dma_set_mask(dev, DMA_32BIT_MASK)) {
			ahc_free(ahc);
			printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
                	return (-ENODEV);
		}
	}
	ahc->dev_softc = pci;
	error = ahc_pci_config(ahc, entry);
	if (error != 0) {
		ahc_free(ahc);
		return (-error);
	}

	/*
	 * Second Function PCI devices need to inherit some
	 * settings from function 0.
	 */
	if ((ahc->features & AHC_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
		ahc_linux_pci_inherit_flags(ahc);

	pci_set_drvdata(pdev, ahc);
	ahc_linux_register_host(ahc, &aic7xxx_driver_template);
	return (0);
}
Пример #11
0
static int xilly_probe(struct pci_dev *pdev,
				 const struct pci_device_id *ent)
{
	struct xilly_endpoint *endpoint;
	int rc = 0;

	endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw);

	if (!endpoint)
		return -ENOMEM;

	pci_set_drvdata(pdev, endpoint);

	rc = pci_enable_device(pdev);

	/* L0s has caused packet drops. No power saving, thank you. */

	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);

	if (rc) {
		dev_err(endpoint->dev,
			"pci_enable_device() failed. Aborting.\n");
		goto no_enable;
	}

	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		dev_err(endpoint->dev,
			"Incorrect BAR configuration. Aborting.\n");
		rc = -ENODEV;
		goto bad_bar;
	}

	rc = pci_request_regions(pdev, xillyname);
	if (rc) {
		dev_err(endpoint->dev,
			"pci_request_regions() failed. Aborting.\n");
		goto failed_request_regions;
	}

	endpoint->registers = pci_iomap(pdev, 0, 128);
	if (!endpoint->registers) {
		dev_err(endpoint->dev, "Failed to map BAR 0. Aborting.\n");
		rc = -EIO;
		goto failed_iomap0;
	}

	pci_set_master(pdev);

	/* Set up a single MSI interrupt */
	if (pci_enable_msi(pdev)) {
		dev_err(endpoint->dev,
			"Failed to enable MSI interrupts. Aborting.\n");
		rc = -ENODEV;
		goto failed_enable_msi;
	}
	rc = request_irq(pdev->irq, xillybus_isr, 0, xillyname, endpoint);

	if (rc) {
		dev_err(endpoint->dev,
			"Failed to register MSI handler. Aborting.\n");
		rc = -ENODEV;
		goto failed_register_msi;
	}

	/*
	 * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1
	 * is the right thing. But some unclever PCIe drivers report it's OK
	 * when the hardware drops those 64-bit PCIe packets. So trust
	 * nobody and use 32 bits DMA addressing in any case.
	 */

	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
		endpoint->dma_using_dac = 0;
	else {
		dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
		rc = -ENODEV;
		goto failed_dmamask;
	}

	rc = xillybus_endpoint_discovery(endpoint);

	if (!rc)
		return 0;

failed_dmamask:
	free_irq(pdev->irq, endpoint);
failed_register_msi:
	pci_disable_msi(pdev);
failed_enable_msi:
	/* pci_clear_master(pdev); Nobody else seems to do this */
	pci_iounmap(pdev, endpoint->registers);
failed_iomap0:
	pci_release_regions(pdev);
failed_request_regions:
bad_bar:
	pci_disable_device(pdev);
no_enable:
	xillybus_do_cleanup(&endpoint->cleanup, endpoint);

	kfree(endpoint);
	return rc;
}
/**
 * heci_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in heci_ish_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int heci_ish_probe(struct pci_dev *pdev,
			const struct pci_device_id *ent)
{
	struct heci_device *dev;
	struct heci_ish_hw *hw;
	int err;

#if defined(SUPPORT_A0_ONLY)
	pdev->revision = REVISION_ID_CHT_A0;
#elif defined(SUPPORT_B0_ONLY)
	pdev->revision = REVISION_ID_CHT_B0;
#endif
	mutex_lock(&heci_mutex);
	if (heci_pci_device) {
		err = -EEXIST;
		goto end;
	}
	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "heci: Failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for heci driver */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "heci: Failed to get pci regions.\n");
		goto disable_device;
	}

	/* allocates and initializes the heci dev structure */
	dev = heci_ish_dev_init(pdev);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	hw = to_ish_hw(dev);

	/* mapping  IO device memory */
	hw->mem_addr = pci_iomap(pdev, 0, 0);
	if (!hw->mem_addr) {
		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
		err = -ENOMEM;
		goto free_device;
	}

	/* clear spurious interrupts */
	heci_clear_interrupts(dev);
	dev_dbg(&pdev->dev, "heci: after heci_clear_interrupts\n");

	heci_pci_device = pdev;

	/* request and enable interrupt   */
#ifndef TIMER_POLLING
	err = request_threaded_irq(pdev->irq,
			heci_ish_irq_quick_handler,
			heci_ish_irq_thread_handler,
			IRQF_SHARED, KBUILD_MODNAME, dev);
	if (err) {
		dev_err(&pdev->dev, "heci: request_threaded_irq failure. irq = %d\n",
			pdev->irq);
		goto free_device;
	}
	dev_dbg(&pdev->dev, "heci: after request_threaded_irq\n");
#else
	/* Init & prepare workqueue */
	INIT_WORK(&ish_poll_work, ish_poll_work_fn);

	/* Create and schedule ISH polling timer */
	init_timer(&ish_poll_timer);
	ish_poll_timer.data = 0;
	ish_poll_timer.function = ish_poll_timer_fn;
	ish_poll_timer.expires = jiffies + 2;
	timer_data = dev;
	add_timer(&ish_poll_timer);

	/* Init ISH polling timers workqueue */
#endif

	/* PCI quirk: prevent from being put into D3 state */
	pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;

#if 0
	/* TEST: in order to test reverse (FW-initiated) reset flow,
	 * set "host ready" here and wait until FW starts its reset
	 */
	dev->recvd_hw_ready = 0;
	heci_ish_set_host_rdy(dev);
#endif

#ifdef	D3_RCR
	/* After that we can enable ISH DMA operation */
	writel(IPC_RMP2_DMA_ENABLED, hw->mem_addr + IPC_REG_ISH_RMP2);

	/* Send 0 IPC message so that ISH FW wakes up if it was already asleep */
	writel(IPC_DRBL_BUSY_BIT, hw->mem_addr + IPC_REG_HOST2ISH_DRBL);
#endif

	if (heci_start(dev)) {
		dev_err(&pdev->dev, "heci: Init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}
	dev_dbg(&pdev->dev, "heci: after heci_start\n");

	err = heci_register(dev);
	if (err)
		goto release_irq;
	dev_dbg(&pdev->dev, "heci: after heci_register\n");

	pci_set_drvdata(pdev, dev);
	dev_dbg(&pdev->dev, "heci: after pci_set_drvdata\n");

	mutex_unlock(&heci_mutex);

	return 0;

	heci_deregister(dev);
release_irq:
	/* disable interrupts */
	heci_disable_interrupts(dev);
	free_irq(pdev->irq, dev);

free_device:
	pci_iounmap(pdev, hw->mem_addr);
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	mutex_unlock(&heci_mutex);
	dev_err(&pdev->dev, "heci: Driver initialization failed.\n");
	return err;
}
Пример #13
0
static int c_can_pci_probe(struct pci_dev *pdev,
			   const struct pci_device_id *ent)
{
	struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data;
	struct c_can_priv *priv;
	struct net_device *dev;
	void __iomem *addr;
	int ret;

	ret = pci_enable_device(pdev);
	if (ret) {
		dev_err(&pdev->dev, "pci_enable_device FAILED\n");
		goto out;
	}

	ret = pci_request_regions(pdev, KBUILD_MODNAME);
	if (ret) {
		dev_err(&pdev->dev, "pci_request_regions FAILED\n");
		goto out_disable_device;
	}

	pci_set_master(pdev);
	pci_enable_msi(pdev);

	addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
	if (!addr) {
		dev_err(&pdev->dev,
			"device has no PCI memory resources, "
			"failing adapter\n");
		ret = -ENOMEM;
		goto out_release_regions;
	}

	/* allocate the c_can device */
	dev = alloc_c_can_dev();
	if (!dev) {
		ret = -ENOMEM;
		goto out_iounmap;
	}

	priv = netdev_priv(dev);
	pci_set_drvdata(pdev, dev);
	SET_NETDEV_DEV(dev, &pdev->dev);

	dev->irq = pdev->irq;
	priv->base = addr;

	if (!c_can_pci_data->freq) {
		dev_err(&pdev->dev, "no clock frequency defined\n");
		ret = -ENODEV;
		goto out_free_c_can;
	} else {
		priv->can.clock.freq = c_can_pci_data->freq;
	}

	/* Configure CAN type */
	switch (c_can_pci_data->type) {
	case BOSCH_C_CAN:
		priv->regs = reg_map_c_can;
		break;
	case BOSCH_D_CAN:
		priv->regs = reg_map_d_can;
		priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
		break;
	default:
		ret = -EINVAL;
		goto out_free_c_can;
	}

	/* Configure access to registers */
	switch (c_can_pci_data->reg_align) {
	case C_CAN_REG_ALIGN_32:
		priv->read_reg = c_can_pci_read_reg_aligned_to_32bit;
		priv->write_reg = c_can_pci_write_reg_aligned_to_32bit;
		break;
	case C_CAN_REG_ALIGN_16:
		priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
		priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
		break;
	default:
		ret = -EINVAL;
		goto out_free_c_can;
	}

	ret = register_c_can_dev(dev);
	if (ret) {
		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
			KBUILD_MODNAME, ret);
		goto out_free_c_can;
	}

	dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
		 KBUILD_MODNAME, priv->regs, dev->irq);

	return 0;

out_free_c_can:
	pci_set_drvdata(pdev, NULL);
	free_c_can_dev(dev);
out_iounmap:
	pci_iounmap(pdev, addr);
out_release_regions:
	pci_disable_msi(pdev);
	pci_clear_master(pdev);
	pci_release_regions(pdev);
out_disable_device:
	pci_disable_device(pdev);
out:
	return ret;
}
Пример #14
0
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct ath_softc *sc;
	struct ieee80211_hw *hw;
	u8 csz;
	u32 val;
	int ret = 0;
	char hw_name[64];

	if (pcim_enable_device(pdev))
		return -EIO;

	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret) {
		pr_err("32-bit DMA not available\n");
		return ret;
	}

	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret) {
		pr_err("32-bit DMA consistent DMA enable failed\n");
		return ret;
	}

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
	if (csz == 0) {
		/*
		 * Linux 2.4.18 (at least) writes the cache line size
		 * register as a 16-bit wide register which is wrong.
		 * We must have this setup properly for rx buffer
		 * DMA to work so force a reasonable value here if it
		 * comes up zero.
		 */
		csz = L1_CACHE_BYTES / sizeof(u32);
		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
	}
	/*
	 * The default setting of latency timer yields poor results,
	 * set it to the value used by other systems. It may be worth
	 * tweaking this setting more.
	 */
	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);

	pci_set_master(pdev);

	/*
	 * Disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state.
	 */
	pci_read_config_dword(pdev, 0x40, &val);
	if ((val & 0x0000ff00) != 0)
		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);

	ret = pcim_iomap_regions(pdev, BIT(0), "ath9k");
	if (ret) {
		dev_err(&pdev->dev, "PCI memory region reserve error\n");
		return -ENODEV;
	}

	ath9k_fill_chanctx_ops();
	hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
	if (!hw) {
		dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
		return -ENOMEM;
	}

	SET_IEEE80211_DEV(hw, &pdev->dev);
	pci_set_drvdata(pdev, hw);

	sc = hw->priv;
	sc->hw = hw;
	sc->dev = &pdev->dev;
	sc->mem = pcim_iomap_table(pdev)[0];
	sc->driver_data = id->driver_data;

	ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
	if (ret) {
		dev_err(&pdev->dev, "request_irq failed\n");
		goto err_irq;
	}

	sc->irq = pdev->irq;

	ret = ath9k_init_device(id->device, sc, &ath_pci_bus_ops);
	if (ret) {
		dev_err(&pdev->dev, "Failed to initialize device\n");
		goto err_init;
	}

	ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
	wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
		   hw_name, (unsigned long)sc->mem, pdev->irq);

	return 0;

err_init:
	free_irq(sc->irq, sc);
err_irq:
	ieee80211_free_hw(hw);
	return ret;
}
Пример #15
0
static int cmodio_pci_probe(struct pci_dev *dev,
				      const struct pci_device_id *id)
{
	struct cmodio_device *priv;
	int ret;

	priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		dev_err(&dev->dev, "unable to allocate private data\n");
		return -ENOMEM;
	}

	pci_set_drvdata(dev, priv);
	priv->pdev = dev;

	/* Hardware Initialization */
	ret = pci_enable_device(dev);
	if (ret) {
		dev_err(&dev->dev, "unable to enable device\n");
		return ret;
	}

	pci_set_master(dev);
	ret = pci_request_regions(dev, DRV_NAME);
	if (ret) {
		dev_err(&dev->dev, "unable to request regions\n");
		goto out_pci_disable_device;
	}

	/* Onboard configuration registers */
	priv->ctrl = pci_ioremap_bar(dev, 4);
	if (!priv->ctrl) {
		dev_err(&dev->dev, "unable to remap onboard regs\n");
		ret = -ENOMEM;
		goto out_pci_release_regions;
	}

	/* Read the hex switch on the carrier board */
	priv->hex = ioread8(&priv->ctrl->int_enable);

	/* Add the MODULbus number (hex switch value) to the device's sysfs */
	ret = sysfs_create_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
	if (ret) {
		dev_err(&dev->dev, "unable to create sysfs attributes\n");
		goto out_unmap_ctrl;
	}

	/*
	 * Disable all interrupt lines, each submodule will enable its
	 * own interrupt line if needed
	 */
	iowrite8(0xf, &priv->ctrl->int_disable);

	/* Register drivers for all submodules */
	ret = cmodio_probe_submodules(priv);
	if (ret) {
		dev_err(&dev->dev, "unable to probe submodules\n");
		goto out_sysfs_remove_group;
	}

	return 0;

out_sysfs_remove_group:
	sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
out_unmap_ctrl:
	iounmap(priv->ctrl);
out_pci_release_regions:
	pci_release_regions(dev);
out_pci_disable_device:
	pci_disable_device(dev);

	return ret;
}
Пример #16
0
static int isp1761_pci_probe(struct pci_dev *dev,
		const struct pci_device_id *id)
{
	u8 latency, limit;
	__u32 reg_data;
	int retry_count;
	struct usb_hcd *hcd;
	unsigned int devflags = 0;
	int ret_status = 0;

	resource_size_t pci_mem_phy0;
	resource_size_t memlength;

	u8 __iomem *chip_addr;
	u8 __iomem *iobase;
	resource_size_t nxp_pci_io_base;
	resource_size_t iolength;

	if (usb_disabled())
		return -ENODEV;

	if (pci_enable_device(dev) < 0)
		return -ENODEV;

	if (!dev->irq)
		return -ENODEV;

	/* Grab the PLX PCI mem maped port start address we need  */
	nxp_pci_io_base = pci_resource_start(dev, 0);
	iolength = pci_resource_len(dev, 0);

	if (!request_mem_region(nxp_pci_io_base, iolength, "ISP1761 IO MEM")) {
		printk(KERN_ERR "request region #1\n");
		return -EBUSY;
	}

	iobase = ioremap_nocache(nxp_pci_io_base, iolength);
	if (!iobase) {
		printk(KERN_ERR "ioremap #1\n");
		ret_status = -ENOMEM;
		goto cleanup1;
	}
	/* Grab the PLX PCI shared memory of the ISP 1761 we need  */
	pci_mem_phy0 = pci_resource_start(dev, 3);
	memlength = pci_resource_len(dev, 3);
	if (memlength < 0xffff) {
		printk(KERN_ERR "memory length for this resource is wrong\n");
		ret_status = -ENOMEM;
		goto cleanup2;
	}

	if (!request_mem_region(pci_mem_phy0, memlength, "ISP-PCI")) {
		printk(KERN_ERR "host controller already in use\n");
		ret_status = -EBUSY;
		goto cleanup2;
	}

	/* map available memory */
	chip_addr = ioremap_nocache(pci_mem_phy0,memlength);
	if (!chip_addr) {
		printk(KERN_ERR "Error ioremap failed\n");
		ret_status = -ENOMEM;
		goto cleanup3;
	}

	/* bad pci latencies can contribute to overruns */
	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &latency);
	if (latency) {
		pci_read_config_byte(dev, PCI_MAX_LAT, &limit);
		if (limit && limit < latency)
			pci_write_config_byte(dev, PCI_LATENCY_TIMER, limit);
	}

	/* Try to check whether we can access Scratch Register of
	 * Host Controller or not. The initial PCI access is retried until
	 * local init for the PCI bridge is completed
	 */
	retry_count = 20;
	reg_data = 0;
	while ((reg_data != 0xFACE) && retry_count) {
		/*by default host is in 16bit mode, so
		 * io operations at this stage must be 16 bit
		 * */
		writel(0xface, chip_addr + HC_SCRATCH_REG);
		udelay(100);
		reg_data = readl(chip_addr + HC_SCRATCH_REG) & 0x0000ffff;
		retry_count--;
	}

	iounmap(chip_addr);

	/* Host Controller presence is detected by writing to scratch register
	 * and reading back and checking the contents are same or not
	 */
	if (reg_data != 0xFACE) {
		dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data);
		ret_status = -ENOMEM;
		goto cleanup3;
	}

	pci_set_master(dev);

	/* configure PLX PCI chip to pass interrupts */
#define PLX_INT_CSR_REG 0x68
	reg_data = readl(iobase + PLX_INT_CSR_REG);
	reg_data |= 0x900;
	writel(reg_data, iobase + PLX_INT_CSR_REG);

	dev->dev.dma_mask = NULL;
	hcd = isp1760_register(pci_mem_phy0, memlength, dev->irq,
		IRQF_SHARED, -ENOENT, &dev->dev, dev_name(&dev->dev),
		devflags);
	if (IS_ERR(hcd)) {
		ret_status = -ENODEV;
		goto cleanup3;
	}

	/* done with PLX IO access */
	iounmap(iobase);
	release_mem_region(nxp_pci_io_base, iolength);

	pci_set_drvdata(dev, hcd);
	return 0;

cleanup3:
	release_mem_region(pci_mem_phy0, memlength);
cleanup2:
	iounmap(iobase);
cleanup1:
	release_mem_region(nxp_pci_io_base, iolength);
	return ret_status;
}
Пример #17
0
int
prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct net_device *ndev;
	u8 latency_tmr;
	u32 mem_addr;
	islpci_private *priv;
	int rvalue;

	/* Enable the pci device */
	if (pci_enable_device(pdev)) {
		printk(KERN_ERR "%s: pci_enable_device() failed.\n", DRV_NAME);
		return -ENODEV;
	}

	/* check whether the latency timer is set correctly */
	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_tmr);
#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_TRACING, "latency timer: %x\n", latency_tmr);
#endif
	if (latency_tmr < PCIDEVICE_LATENCY_TIMER_MIN) {
		/* set the latency timer */
		pci_write_config_byte(pdev, PCI_LATENCY_TIMER,
				      PCIDEVICE_LATENCY_TIMER_VAL);
	}

	/* enable PCI DMA */
	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
		printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME);
		goto do_pci_disable_device;
        }

	/* 0x40 is the programmable timer to configure the response timeout (TRDY_TIMEOUT)
	 * 0x41 is the programmable timer to configure the retry timeout (RETRY_TIMEOUT)
	 * 	The RETRY_TIMEOUT is used to set the number of retries that the core, as a
	 * 	Master, will perform before abandoning a cycle. The default value for
	 * 	RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new
	 * 	devices. A write of zero to the RETRY_TIMEOUT register disables this
	 * 	function to allow use with any non-compliant legacy devices that may
	 * 	execute more retries.
	 *
	 * 	Writing zero to both these two registers will disable both timeouts and
	 * 	*can* solve problems caused by devices that are slow to respond.
	 *	Make this configurable - MSW
	 */
	if ( init_pcitm >= 0 ) {
		pci_write_config_byte(pdev, 0x40, (u8)init_pcitm);
		pci_write_config_byte(pdev, 0x41, (u8)init_pcitm);
	} else {
		printk(KERN_INFO "PCI TRDY/RETRY unchanged\n");
	}

	/* request the pci device I/O regions */
	rvalue = pci_request_regions(pdev, DRV_NAME);
	if (rvalue) {
		printk(KERN_ERR "%s: pci_request_regions failure (rc=%d)\n",
		       DRV_NAME, rvalue);
		goto do_pci_disable_device;
	}

	/* check if the memory window is indeed set */
	rvalue = pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &mem_addr);
	if (rvalue || !mem_addr) {
		printk(KERN_ERR "%s: PCI device memory region not configured; fix your BIOS or CardBus bridge/drivers\n",
		       DRV_NAME);
		goto do_pci_release_regions;
	}

	/* enable PCI bus-mastering */
	DEBUG(SHOW_TRACING, "%s: pci_set_master(pdev)\n", DRV_NAME);
	pci_set_master(pdev);

	/* enable MWI */
	pci_set_mwi(pdev);

	/* setup the network device interface and its structure */
	if (!(ndev = islpci_setup(pdev))) {
		/* error configuring the driver as a network device */
		printk(KERN_ERR "%s: could not configure network device\n",
		       DRV_NAME);
		goto do_pci_release_regions;
	}

	priv = netdev_priv(ndev);
	islpci_set_state(priv, PRV_STATE_PREBOOT); /* we are attempting to boot */

	/* card is in unknown state yet, might have some interrupts pending */
	isl38xx_disable_interrupts(priv->device_base);

	/* request for the interrupt before uploading the firmware */
	rvalue = request_irq(pdev->irq, &islpci_interrupt,
			     SA_SHIRQ, ndev->name, priv);

	if (rvalue) {
		/* error, could not hook the handler to the irq */
		printk(KERN_ERR "%s: could not install IRQ handler\n",
		       ndev->name);
		goto do_unregister_netdev;
	}

	/* firmware upload is triggered in islpci_open */

	return 0;

      do_unregister_netdev:
	unregister_netdev(ndev);
	islpci_free_memory(priv);
	pci_set_drvdata(pdev, NULL);
	free_netdev(ndev);
	priv = NULL;
      do_pci_release_regions:
	pci_release_regions(pdev);
      do_pci_disable_device:
	pci_disable_device(pdev);
	return -EIO;
}
Пример #18
0
static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
	static int printed_version;
	unsigned int i;
	int rc;
	struct ata_probe_ent *probe_ent;
	int board_id = (int) ent->driver_data;
	const int *bar_sizes;
	int pci_dev_busy = 0;
	u8 tmp8;

	if (!printed_version++)
		printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");

	rc = pci_enable_device(pdev);
	if (rc)
		return rc;

	rc = pci_request_regions(pdev, DRV_NAME);
	if (rc) {
		pci_dev_busy = 1;
		goto err_out;
	}

	if (board_id == vt6420) {
		pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8);
		if (tmp8 & SATA_2DEV) {
			printk(KERN_ERR DRV_NAME "(%s): SATA master/slave not supported (0x%x)\n",
		       	pci_name(pdev), (int) tmp8);
			rc = -EIO;
			goto err_out_regions;
		}

		bar_sizes = &svia_bar_sizes[0];
	} else {
		bar_sizes = &vt6421_bar_sizes[0];
	}

	for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
		if ((pci_resource_start(pdev, i) == 0) ||
		    (pci_resource_len(pdev, i) < bar_sizes[i])) {
			printk(KERN_ERR DRV_NAME "(%s): invalid PCI BAR %u (sz 0x%lx, val 0x%lx)\n",
			       pci_name(pdev), i,
			       pci_resource_start(pdev, i),
			       pci_resource_len(pdev, i));
			rc = -ENODEV;
			goto err_out_regions;
		}

	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		goto err_out_regions;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		goto err_out_regions;

	if (board_id == vt6420)
		probe_ent = vt6420_init_probe_ent(pdev);
	else
		probe_ent = vt6421_init_probe_ent(pdev);
	
	if (!probe_ent) {
		printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
		       pci_name(pdev));
		rc = -ENOMEM;
		goto err_out_regions;
	}

	svia_configure(pdev);

	pci_set_master(pdev);

	/* FIXME: check ata_device_add return value */
	ata_device_add(probe_ent);
	kfree(probe_ent);

	return 0;

err_out_regions:
	pci_release_regions(pdev);
err_out:
	if (!pci_dev_busy)
		pci_disable_device(pdev);
	return rc;
}
Пример #19
0
static int __devinit
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct net_device *dev;
	struct netdev_private *np;
	static int card_idx;
	int chip_idx = ent->driver_data;
	int err, irq;
	long ioaddr;
	static int version_printed;
	void *ring_space;
	dma_addr_t ring_dma;

	if (!version_printed++)
		printk ("%s", version);

	err = pci_enable_device (pdev);
	if (err)
		return err;

	irq = pdev->irq;
	err = pci_request_regions (pdev, "dl2k");
	if (err)
		goto err_out_disable;

	pci_set_master (pdev);
	dev = alloc_etherdev (sizeof (*np));
	if (!dev) {
		err = -ENOMEM;
		goto err_out_res;
	}
	SET_NETDEV_DEV(dev, &pdev->dev);

#ifdef MEM_MAPPING
	ioaddr = pci_resource_start (pdev, 1);
	ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE);
	if (!ioaddr) {
		err = -ENOMEM;
		goto err_out_dev;
	}
#else
	ioaddr = pci_resource_start (pdev, 0);
#endif
	dev->base_addr = ioaddr;
	dev->irq = irq;
	np = netdev_priv(dev);
	np->chip_id = chip_idx;
	np->pdev = pdev;
	spin_lock_init (&np->tx_lock);
	spin_lock_init (&np->rx_lock);

	/* Parse manual configuration */
	np->an_enable = 1;
	np->tx_coalesce = 1;
	if (card_idx < MAX_UNITS) {
		if (media[card_idx] != NULL) {
			np->an_enable = 0;
			if (strcmp (media[card_idx], "auto") == 0 ||
			    strcmp (media[card_idx], "autosense") == 0 ||
			    strcmp (media[card_idx], "0") == 0 ) {
				np->an_enable = 2;
			} else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
			    strcmp (media[card_idx], "4") == 0) {
				np->speed = 100;
				np->full_duplex = 1;
			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
				   strcmp (media[card_idx], "3") == 0) {
				np->speed = 100;
				np->full_duplex = 0;
			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
				   strcmp (media[card_idx], "2") == 0) {
				np->speed = 10;
				np->full_duplex = 1;
			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
				   strcmp (media[card_idx], "1") == 0) {
				np->speed = 10;
				np->full_duplex = 0;
			} else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
				 strcmp (media[card_idx], "6") == 0) {
				np->speed=1000;
				np->full_duplex=1;
			} else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
				 strcmp (media[card_idx], "5") == 0) {
				np->speed = 1000;
				np->full_duplex = 0;
			} else {
				np->an_enable = 1;
			}
		}
		if (jumbo[card_idx] != 0) {
			np->jumbo = 1;
			dev->mtu = MAX_JUMBO;
		} else {
			np->jumbo = 0;
			if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
				dev->mtu = mtu[card_idx];
		}
		np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
		    vlan[card_idx] : 0;
		if (rx_coalesce > 0 && rx_timeout > 0) {
			np->rx_coalesce = rx_coalesce;
			np->rx_timeout = rx_timeout;
			np->coalesce = 1;
		}
		np->tx_flow = (tx_flow == 0) ? 0 : 1;
		np->rx_flow = (rx_flow == 0) ? 0 : 1;

		if (tx_coalesce < 1)
			tx_coalesce = 1;
		else if (tx_coalesce > TX_RING_SIZE-1)
			tx_coalesce = TX_RING_SIZE - 1;
	}
	dev->netdev_ops = &netdev_ops;
	dev->watchdog_timeo = TX_TIMEOUT;
	SET_ETHTOOL_OPS(dev, &ethtool_ops);
#if 0
	dev->features = NETIF_F_IP_CSUM;
#endif
	pci_set_drvdata (pdev, dev);

	ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
	if (!ring_space)
		goto err_out_iounmap;
	np->tx_ring = (struct netdev_desc *) ring_space;
	np->tx_ring_dma = ring_dma;

	ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
	if (!ring_space)
		goto err_out_unmap_tx;
	np->rx_ring = (struct netdev_desc *) ring_space;
	np->rx_ring_dma = ring_dma;

	/* Parse eeprom data */
	parse_eeprom (dev);

	/* Find PHY address */
	err = find_miiphy (dev);
	if (err)
		goto err_out_unmap_rx;

	/* Fiber device? */
	np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
	np->link_status = 0;
	/* Set media and reset PHY */
	if (np->phy_media) {
		/* default Auto-Negotiation for fiber deivices */
	 	if (np->an_enable == 2) {
			np->an_enable = 1;
		}
		mii_set_media_pcs (dev);
	} else {
		/* Auto-Negotiation is mandatory for 1000BASE-T,
		   IEEE 802.3ab Annex 28D page 14 */
		if (np->speed == 1000)
			np->an_enable = 1;
		mii_set_media (dev);
	}

	err = register_netdev (dev);
	if (err)
		goto err_out_unmap_rx;

	card_idx++;

	printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
		dev->name, np->name, dev->dev_addr, irq);
	if (tx_coalesce > 1)
//		printk(KERN_INFO "tx_coalesce:\t%d packets\n",
;
	if (np->coalesce)
//		printk(KERN_INFO
//		       "rx_coalesce:\t%d packets\n"
//		       "rx_timeout: \t%d ns\n",
;
	if (np->vlan)
;
	return 0;

      err_out_unmap_rx:
	pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
      err_out_unmap_tx:
	pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
      err_out_iounmap:
#ifdef MEM_MAPPING
	iounmap ((void *) ioaddr);

      err_out_dev:
#endif
	free_netdev (dev);

      err_out_res:
	pci_release_regions (pdev);

      err_out_disable:
	pci_disable_device (pdev);
	return err;
}
Пример #20
0
static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	int result;
	struct service_processor *sp;

	if ((result = pci_enable_device(pdev))) {
		dev_err(&pdev->dev, "Failed to enable PCI device\n");
		return result;
	}
	if ((result = pci_request_regions(pdev, DRIVER_NAME))) {
		dev_err(&pdev->dev, "Failed to allocate PCI resources\n");
		goto error_resources;
	}
	/* vnc client won't work without bus-mastering */
	pci_set_master(pdev);

	sp = kzalloc(sizeof(struct service_processor), GFP_KERNEL);
	if (sp == NULL) {
		dev_err(&pdev->dev, "Failed to allocate memory\n");
		result = -ENOMEM;
		goto error_kmalloc;
	}

	spin_lock_init(&sp->lock);
	INIT_LIST_HEAD(&sp->command_queue);

	pci_set_drvdata(pdev, (void *)sp);
	sp->dev = &pdev->dev;
	sp->number = pdev->bus->number;
	snprintf(sp->dirname, IBMASM_NAME_SIZE, "%d", sp->number);
	snprintf(sp->devname, IBMASM_NAME_SIZE, "%s%d", DRIVER_NAME, sp->number);

	if (ibmasm_event_buffer_init(sp)) {
		dev_err(sp->dev, "Failed to allocate event buffer\n");
		goto error_eventbuffer;
	}

	if (ibmasm_heartbeat_init(sp)) {
		dev_err(sp->dev, "Failed to allocate heartbeat command\n");
		goto error_heartbeat;
	}

	sp->irq = pdev->irq;
	sp->base_address = pci_ioremap_bar(pdev, 0);
	if (!sp->base_address) {
		dev_err(sp->dev, "Failed to ioremap pci memory\n");
		result =  -ENODEV;
		goto error_ioremap;
	}

	result = request_irq(sp->irq, ibmasm_interrupt_handler, IRQF_SHARED, sp->devname, (void*)sp);
	if (result) {
		dev_err(sp->dev, "Failed to register interrupt handler\n");
		goto error_request_irq;
	}

	enable_sp_interrupts(sp->base_address);

	result = ibmasm_init_remote_input_dev(sp);
	if (result) {
		dev_err(sp->dev, "Failed to initialize remote queue\n");
		goto error_send_message;
	}

	result = ibmasm_send_driver_vpd(sp);
	if (result) {
		dev_err(sp->dev, "Failed to send driver VPD to service processor\n");
		goto error_send_message;
	}
	result = ibmasm_send_os_state(sp, SYSTEM_STATE_OS_UP);
	if (result) {
		dev_err(sp->dev, "Failed to send OS state to service processor\n");
		goto error_send_message;
	}
	ibmasmfs_add_sp(sp);

	ibmasm_register_uart(sp);

	return 0;

error_send_message:
	disable_sp_interrupts(sp->base_address);
	ibmasm_free_remote_input_dev(sp);
	free_irq(sp->irq, (void *)sp);
error_request_irq:
	iounmap(sp->base_address);
error_ioremap:
	ibmasm_heartbeat_exit(sp);
error_heartbeat:
	ibmasm_event_buffer_exit(sp);
error_eventbuffer:
	kfree(sp);
error_kmalloc:
        pci_release_regions(pdev);
error_resources:
        pci_disable_device(pdev);

	return result;
}
Пример #21
0
static int snd_atiixp_pcm_open(struct snd_pcm_substream *substream,
			       struct atiixp_dma *dma, int pcm_type)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	struct snd_pcm_runtime *runtime = substream->runtime;
	int err;
	static unsigned int rates[] = { 8000,  9600, 12000, 16000 };
	static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
		.count = ARRAY_SIZE(rates),
		.list = rates,
		.mask = 0,
	};

	if (snd_BUG_ON(!dma->ops || !dma->ops->enable_dma))
		return -EINVAL;

	if (dma->opened)
		return -EBUSY;
	dma->substream = substream;
	runtime->hw = snd_atiixp_pcm_hw;
	dma->ac97_pcm_type = pcm_type;
	if ((err = snd_pcm_hw_constraint_list(runtime, 0,
					      SNDRV_PCM_HW_PARAM_RATE,
					      &hw_constraints_rates)) < 0)
		return err;
	if ((err = snd_pcm_hw_constraint_integer(runtime,
						 SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
		return err;
	runtime->private_data = dma;

	/* enable DMA bits */
	spin_lock_irq(&chip->reg_lock);
	dma->ops->enable_dma(chip, 1);
	spin_unlock_irq(&chip->reg_lock);
	dma->opened = 1;

	return 0;
}

static int snd_atiixp_pcm_close(struct snd_pcm_substream *substream,
				struct atiixp_dma *dma)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	/* disable DMA bits */
	if (snd_BUG_ON(!dma->ops || !dma->ops->enable_dma))
		return -EINVAL;
	spin_lock_irq(&chip->reg_lock);
	dma->ops->enable_dma(chip, 0);
	spin_unlock_irq(&chip->reg_lock);
	dma->substream = NULL;
	dma->opened = 0;
	return 0;
}

/*
 */
static int snd_atiixp_playback_open(struct snd_pcm_substream *substream)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	int err;

	mutex_lock(&chip->open_mutex);
	err = snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_PLAYBACK], 0);
	mutex_unlock(&chip->open_mutex);
	if (err < 0)
		return err;
	return 0;
}

static int snd_atiixp_playback_close(struct snd_pcm_substream *substream)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	int err;
	mutex_lock(&chip->open_mutex);
	err = snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_PLAYBACK]);
	mutex_unlock(&chip->open_mutex);
	return err;
}

static int snd_atiixp_capture_open(struct snd_pcm_substream *substream)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	return snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_CAPTURE], 1);
}

static int snd_atiixp_capture_close(struct snd_pcm_substream *substream)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	return snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_CAPTURE]);
}


/* AC97 playback */
static struct snd_pcm_ops snd_atiixp_playback_ops = {
	.open =		snd_atiixp_playback_open,
	.close =	snd_atiixp_playback_close,
	.ioctl =	snd_pcm_lib_ioctl,
	.hw_params =	snd_atiixp_pcm_hw_params,
	.hw_free =	snd_atiixp_pcm_hw_free,
	.prepare =	snd_atiixp_playback_prepare,
	.trigger =	snd_atiixp_pcm_trigger,
	.pointer =	snd_atiixp_pcm_pointer,
};

/* AC97 capture */
static struct snd_pcm_ops snd_atiixp_capture_ops = {
	.open =		snd_atiixp_capture_open,
	.close =	snd_atiixp_capture_close,
	.ioctl =	snd_pcm_lib_ioctl,
	.hw_params =	snd_atiixp_pcm_hw_params,
	.hw_free =	snd_atiixp_pcm_hw_free,
	.prepare =	snd_atiixp_capture_prepare,
	.trigger =	snd_atiixp_pcm_trigger,
	.pointer =	snd_atiixp_pcm_pointer,
};

static struct atiixp_dma_ops snd_atiixp_playback_dma_ops = {
	.type = ATI_DMA_PLAYBACK,
	.llp_offset = ATI_REG_MODEM_OUT_DMA1_LINKPTR,
	.dt_cur = ATI_REG_MODEM_OUT_DMA1_DT_CUR,
	.enable_dma = atiixp_out_enable_dma,
	.enable_transfer = atiixp_out_enable_transfer,
	.flush_dma = atiixp_out_flush_dma,
};
	
static struct atiixp_dma_ops snd_atiixp_capture_dma_ops = {
	.type = ATI_DMA_CAPTURE,
	.llp_offset = ATI_REG_MODEM_IN_DMA_LINKPTR,
	.dt_cur = ATI_REG_MODEM_IN_DMA_DT_CUR,
	.enable_dma = atiixp_in_enable_dma,
	.enable_transfer = atiixp_in_enable_transfer,
	.flush_dma = atiixp_in_flush_dma,
};

static int __devinit snd_atiixp_pcm_new(struct atiixp_modem *chip)
{
	struct snd_pcm *pcm;
	int err;

	/* initialize constants */
	chip->dmas[ATI_DMA_PLAYBACK].ops = &snd_atiixp_playback_dma_ops;
	chip->dmas[ATI_DMA_CAPTURE].ops = &snd_atiixp_capture_dma_ops;

	/* PCM #0: analog I/O */
	err = snd_pcm_new(chip->card, "ATI IXP MC97", ATI_PCMDEV_ANALOG, 1, 1, &pcm);
	if (err < 0)
		return err;
	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_atiixp_playback_ops);
	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_atiixp_capture_ops);
	pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
	pcm->private_data = chip;
	strcpy(pcm->name, "ATI IXP MC97");
	chip->pcmdevs[ATI_PCMDEV_ANALOG] = pcm;

	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
					      snd_dma_pci_data(chip->pci),
					      64*1024, 128*1024);

	return 0;
}



/*
 * interrupt handler
 */
static irqreturn_t snd_atiixp_interrupt(int irq, void *dev_id)
{
	struct atiixp_modem *chip = dev_id;
	unsigned int status;

	status = atiixp_read(chip, ISR);

	if (! status)
		return IRQ_NONE;

	/* process audio DMA */
	if (status & ATI_REG_ISR_MODEM_OUT1_XRUN)
		snd_atiixp_xrun_dma(chip,  &chip->dmas[ATI_DMA_PLAYBACK]);
	else if (status & ATI_REG_ISR_MODEM_OUT1_STATUS)
		snd_atiixp_update_dma(chip, &chip->dmas[ATI_DMA_PLAYBACK]);
	if (status & ATI_REG_ISR_MODEM_IN_XRUN)
		snd_atiixp_xrun_dma(chip,  &chip->dmas[ATI_DMA_CAPTURE]);
	else if (status & ATI_REG_ISR_MODEM_IN_STATUS)
		snd_atiixp_update_dma(chip, &chip->dmas[ATI_DMA_CAPTURE]);

	/* for codec detection */
	if (status & CODEC_CHECK_BITS) {
		unsigned int detected;
		detected = status & CODEC_CHECK_BITS;
		spin_lock(&chip->reg_lock);
		chip->codec_not_ready_bits |= detected;
		atiixp_update(chip, IER, detected, 0); /* disable the detected irqs */
		spin_unlock(&chip->reg_lock);
	}

	/* ack */
	atiixp_write(chip, ISR, status);

	return IRQ_HANDLED;
}


/*
 * ac97 mixer section
 */

static int __devinit snd_atiixp_mixer_new(struct atiixp_modem *chip, int clock)
{
	struct snd_ac97_bus *pbus;
	struct snd_ac97_template ac97;
	int i, err;
	int codec_count;
	static struct snd_ac97_bus_ops ops = {
		.write = snd_atiixp_ac97_write,
		.read = snd_atiixp_ac97_read,
	};
	static unsigned int codec_skip[NUM_ATI_CODECS] = {
		ATI_REG_ISR_CODEC0_NOT_READY,
		ATI_REG_ISR_CODEC1_NOT_READY,
		ATI_REG_ISR_CODEC2_NOT_READY,
	};

	if (snd_atiixp_codec_detect(chip) < 0)
		return -ENXIO;

	if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus)) < 0)
		return err;
	pbus->clock = clock;
	chip->ac97_bus = pbus;

	codec_count = 0;
	for (i = 0; i < NUM_ATI_CODECS; i++) {
		if (chip->codec_not_ready_bits & codec_skip[i])
			continue;
		memset(&ac97, 0, sizeof(ac97));
		ac97.private_data = chip;
		ac97.pci = chip->pci;
		ac97.num = i;
		ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE;
		if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i])) < 0) {
			chip->ac97[i] = NULL; /* to be sure */
			snd_printdd("atiixp-modem: codec %d not available for modem\n", i);
			continue;
		}
		codec_count++;
	}

	if (! codec_count) {
		snd_printk(KERN_ERR "atiixp-modem: no codec available\n");
		return -ENODEV;
	}

	/* snd_ac97_tune_hardware(chip->ac97, ac97_quirks); */

	return 0;
}


#ifdef CONFIG_PM
/*
 * power management
 */
static int snd_atiixp_suspend(struct pci_dev *pci, pm_message_t state)
{
	struct snd_card *card = pci_get_drvdata(pci);
	struct atiixp_modem *chip = card->private_data;
	int i;

	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
	for (i = 0; i < NUM_ATI_PCMDEVS; i++)
		snd_pcm_suspend_all(chip->pcmdevs[i]);
	for (i = 0; i < NUM_ATI_CODECS; i++)
		snd_ac97_suspend(chip->ac97[i]);
	snd_atiixp_aclink_down(chip);
	snd_atiixp_chip_stop(chip);

	pci_disable_device(pci);
	pci_save_state(pci);
	pci_set_power_state(pci, pci_choose_state(pci, state));
	return 0;
}

static int snd_atiixp_resume(struct pci_dev *pci)
{
	struct snd_card *card = pci_get_drvdata(pci);
	struct atiixp_modem *chip = card->private_data;
	int i;

	pci_set_power_state(pci, PCI_D0);
	pci_restore_state(pci);
	if (pci_enable_device(pci) < 0) {
		printk(KERN_ERR "atiixp-modem: pci_enable_device failed, "
		       "disabling device\n");
		snd_card_disconnect(card);
		return -EIO;
	}
	pci_set_master(pci);

	snd_atiixp_aclink_reset(chip);
	snd_atiixp_chip_start(chip);

	for (i = 0; i < NUM_ATI_CODECS; i++)
		snd_ac97_resume(chip->ac97[i]);

	snd_power_change_state(card, SNDRV_CTL_POWER_D0);
	return 0;
}
#endif /* CONFIG_PM */


#ifdef CONFIG_PROC_FS
/*
 * proc interface for register dump
 */

static void snd_atiixp_proc_read(struct snd_info_entry *entry,
				 struct snd_info_buffer *buffer)
{
	struct atiixp_modem *chip = entry->private_data;
	int i;

	for (i = 0; i < 256; i += 4)
		snd_iprintf(buffer, "%02x: %08x\n", i, readl(chip->remap_addr + i));
}

static void __devinit snd_atiixp_proc_init(struct atiixp_modem *chip)
{
	struct snd_info_entry *entry;

	if (! snd_card_proc_new(chip->card, "atiixp-modem", &entry))
		snd_info_set_text_ops(entry, chip, snd_atiixp_proc_read);
}
#else
#define snd_atiixp_proc_init(chip)
#endif


/*
 * destructor
 */

static int snd_atiixp_free(struct atiixp_modem *chip)
{
	if (chip->irq < 0)
		goto __hw_end;
	snd_atiixp_chip_stop(chip);

      __hw_end:
	if (chip->irq >= 0)
		free_irq(chip->irq, chip);
	if (chip->remap_addr)
		iounmap(chip->remap_addr);
	pci_release_regions(chip->pci);
	pci_disable_device(chip->pci);
	kfree(chip);
	return 0;
}

static int snd_atiixp_dev_free(struct snd_device *device)
{
	struct atiixp_modem *chip = device->device_data;
	return snd_atiixp_free(chip);
}

/*
 * constructor for chip instance
 */
static int __devinit snd_atiixp_create(struct snd_card *card,
				       struct pci_dev *pci,
				       struct atiixp_modem **r_chip)
{
	static struct snd_device_ops ops = {
		.dev_free =	snd_atiixp_dev_free,
	};
	struct atiixp_modem *chip;
	int err;

	if ((err = pci_enable_device(pci)) < 0)
		return err;

	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
	if (chip == NULL) {
		pci_disable_device(pci);
		return -ENOMEM;
	}

	spin_lock_init(&chip->reg_lock);
	mutex_init(&chip->open_mutex);
	chip->card = card;
	chip->pci = pci;
	chip->irq = -1;
	if ((err = pci_request_regions(pci, "ATI IXP MC97")) < 0) {
		kfree(chip);
		pci_disable_device(pci);
		return err;
	}
	chip->addr = pci_resource_start(pci, 0);
	chip->remap_addr = pci_ioremap_bar(pci, 0);
	if (chip->remap_addr == NULL) {
		snd_printk(KERN_ERR "AC'97 space ioremap problem\n");
		snd_atiixp_free(chip);
		return -EIO;
	}

	if (request_irq(pci->irq, snd_atiixp_interrupt, IRQF_SHARED,
			KBUILD_MODNAME, chip)) {
		snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
		snd_atiixp_free(chip);
		return -EBUSY;
	}
	chip->irq = pci->irq;
	pci_set_master(pci);
	synchronize_irq(chip->irq);

	if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
		snd_atiixp_free(chip);
		return err;
	}

	snd_card_set_dev(card, &pci->dev);

	*r_chip = chip;
	return 0;
}


static int __devinit snd_atiixp_probe(struct pci_dev *pci,
				      const struct pci_device_id *pci_id)
{
	struct snd_card *card;
	struct atiixp_modem *chip;
	int err;

	err = snd_card_create(index, id, THIS_MODULE, 0, &card);
	if (err < 0)
		return err;

	strcpy(card->driver, "ATIIXP-MODEM");
	strcpy(card->shortname, "ATI IXP Modem");
	if ((err = snd_atiixp_create(card, pci, &chip)) < 0)
		goto __error;
	card->private_data = chip;

	if ((err = snd_atiixp_aclink_reset(chip)) < 0)
		goto __error;

	if ((err = snd_atiixp_mixer_new(chip, ac97_clock)) < 0)
		goto __error;

	if ((err = snd_atiixp_pcm_new(chip)) < 0)
		goto __error;
	
	snd_atiixp_proc_init(chip);

	snd_atiixp_chip_start(chip);

	sprintf(card->longname, "%s rev %x at 0x%lx, irq %i",
		card->shortname, pci->revision, chip->addr, chip->irq);

	if ((err = snd_card_register(card)) < 0)
		goto __error;

	pci_set_drvdata(pci, card);
	return 0;

 __error:
	snd_card_free(card);
	return err;
}

static void __devexit snd_atiixp_remove(struct pci_dev *pci)
{
	snd_card_free(pci_get_drvdata(pci));
	pci_set_drvdata(pci, NULL);
}
Пример #22
0
static int ohci_pci_resume (struct usb_hcd *hcd)
{
	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
	int			temp;
	int			retval = 0;
	unsigned long		flags;

#ifdef CONFIG_PMAC_PBOOK
	{
		struct device_node *of_node;

		/* Re-enable USB PAD & cell clock */
		of_node = pci_device_to_OF_node (hcd->pdev);
		if (of_node)
			pmac_call_feature (PMAC_FTR_USB_ENABLE, of_node, 0, 1);
	}
#endif
	/* did we suspend, or were we powered off? */
	ohci->hc_control = readl (&ohci->regs->control);
	temp = ohci->hc_control & OHCI_CTRL_HCFS;

#ifdef DEBUG
	/* the registers may look crazy here */
	ohci_dump_status (ohci, 0, 0);
#endif

	/* Re-enable bus mastering */
	pci_set_master (ohci->hcd.pdev);
	
	switch (temp) {

	case OHCI_USB_RESET:	// lost power
		ohci_info (ohci, "USB restart\n");
		retval = hc_restart (ohci);
		break;

	case OHCI_USB_SUSPEND:	// host wakeup
	case OHCI_USB_RESUME:	// remote wakeup
		ohci_info (ohci, "USB continue from %s wakeup\n",
			 (temp == OHCI_USB_SUSPEND)
				? "host" : "remote");
		ohci->hc_control = OHCI_USB_RESUME;
		writel (ohci->hc_control, &ohci->regs->control);
		(void) readl (&ohci->regs->control);
		mdelay (20); /* no schedule here ! */
		/* Some controllers (lucent) need a longer delay here */
		mdelay (15);

		temp = readl (&ohci->regs->control);
		temp = ohci->hc_control & OHCI_CTRL_HCFS;
		if (temp != OHCI_USB_RESUME) {
			ohci_err (ohci, "controller won't resume\n");
			ohci->disabled = 1;
			retval = -EIO;
			break;
		}

		/* Some chips likes being resumed first */
		writel (OHCI_USB_OPER, &ohci->regs->control);
		(void) readl (&ohci->regs->control);
		mdelay (3);

		/* Then re-enable operations */
		spin_lock_irqsave (&ohci->lock, flags);
		ohci->disabled = 0;
		ohci->sleeping = 0;
		ohci->hc_control = OHCI_CONTROL_INIT | OHCI_USB_OPER;
		if (!ohci->ed_rm_list) {
			if (ohci->ed_controltail)
				ohci->hc_control |= OHCI_CTRL_CLE;
			if (ohci->ed_bulktail)
				ohci->hc_control |= OHCI_CTRL_BLE;
		}
		hcd->state = USB_STATE_READY;
		writel (ohci->hc_control, &ohci->regs->control);

		/* trigger a start-frame interrupt (why?) */
		writel (OHCI_INTR_SF, &ohci->regs->intrstatus);
		writel (OHCI_INTR_SF, &ohci->regs->intrenable);

		/* Check for a pending done list */
		writel (OHCI_INTR_WDH, &ohci->regs->intrdisable);	
		(void) readl (&ohci->regs->intrdisable);
		spin_unlock_irqrestore (&ohci->lock, flags);

#ifdef CONFIG_PMAC_PBOOK
		if (_machine == _MACH_Pmac)
			enable_irq (hcd->pdev->irq);
#endif
		if (ohci->hcca->done_head)
			dl_done_list (ohci, dl_reverse_done_list (ohci), NULL);
		writel (OHCI_INTR_WDH, &ohci->regs->intrenable); 

		/* assume there are TDs on the bulk and control lists */
		writel (OHCI_BLF | OHCI_CLF, &ohci->regs->cmdstatus);

// ohci_dump_status (ohci);
ohci_dbg (ohci, "sleeping = %d, disabled = %d\n",
		ohci->sleeping, ohci->disabled);
		break;

	default:
		ohci_warn (ohci, "odd PCI resume\n");
	}
	return retval;
}
Пример #23
0
int asihpi_adapter_probe(struct pci_dev *pci_dev,
			 const struct pci_device_id *pci_id)
{
	int idx, nm;
	int adapter_index;
	unsigned int memlen;
	struct hpi_message hm;
	struct hpi_response hr;
	struct hpi_adapter adapter;
	struct hpi_pci pci;

	memset(&adapter, 0, sizeof(adapter));

	dev_printk(KERN_DEBUG, &pci_dev->dev,
		"probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor,
		pci_dev->device, pci_dev->subsystem_vendor,
		pci_dev->subsystem_device, pci_dev->devfn);

	if (pci_enable_device(pci_dev) < 0) {
		dev_err(&pci_dev->dev,
			"pci_enable_device failed, disabling device\n");
		return -EIO;
	}

	pci_set_master(pci_dev);	/* also sets latency timer if < 16 */

	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
		HPI_SUBSYS_CREATE_ADAPTER);
	hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER,
		HPI_ERROR_PROCESSING_MESSAGE);

	hm.adapter_index = HPI_ADAPTER_INDEX_INVALID;

	nm = HPI_MAX_ADAPTER_MEM_SPACES;

	for (idx = 0; idx < nm; idx++) {
		HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx,
			&pci_dev->resource[idx]);

		if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) {
			memlen = pci_resource_len(pci_dev, idx);
			pci.ap_mem_base[idx] =
				ioremap(pci_resource_start(pci_dev, idx),
				memlen);
			if (!pci.ap_mem_base[idx]) {
				HPI_DEBUG_LOG(ERROR,
					"ioremap failed, aborting\n");
				/* unmap previously mapped pci mem space */
				goto err;
			}
		}
	}

	pci.pci_dev = pci_dev;
	hm.u.s.resource.bus_type = HPI_BUS_PCI;
	hm.u.s.resource.r.pci = &pci;

	/* call CreateAdapterObject on the relevant hpi module */
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
	if (hr.error)
		goto err;

	adapter_index = hr.u.s.adapter_index;
	adapter.adapter = hpi_find_adapter(adapter_index);

	if (prealloc_stream_buf) {
		adapter.p_buffer = vmalloc(prealloc_stream_buf);
		if (!adapter.p_buffer) {
			HPI_DEBUG_LOG(ERROR,
				"HPI could not allocate "
				"kernel buffer size %d\n",
				prealloc_stream_buf);
			goto err;
		}
	}

	hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
		HPI_ADAPTER_OPEN);
	hm.adapter_index = adapter.adapter->index;
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);

	if (hr.error)
		goto err;

	/* WARNING can't init mutex in 'adapter'
	 * and then copy it to adapters[] ?!?!
	 */
	adapters[adapter_index] = adapter;
	mutex_init(&adapters[adapter_index].mutex);
	pci_set_drvdata(pci_dev, &adapters[adapter_index]);

	dev_info(&pci_dev->dev, "probe succeeded for ASI%04X HPI index %d\n",
		 adapter.adapter->type, adapter_index);

	return 0;

err:
	for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
		if (pci.ap_mem_base[idx]) {
			iounmap(pci.ap_mem_base[idx]);
			pci.ap_mem_base[idx] = NULL;
		}
	}

	if (adapter.p_buffer) {
		adapter.buffer_size = 0;
		vfree(adapter.p_buffer);
	}

	HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n");
	return -ENODEV;
}
Пример #24
0
/*
 *    probe function - creates the card manager
 */
static int __devinit snd_mixart_probe(struct pci_dev *pci,
				      const struct pci_device_id *pci_id)
{
	static int dev;
	struct mixart_mgr *mgr;
	unsigned int i;
	int err;
	size_t size;

	/*
	 */
	if (dev >= SNDRV_CARDS)
		return -ENODEV;
	if (! enable[dev]) {
		dev++;
		return -ENOENT;
	}

	/* enable PCI device */
	if ((err = pci_enable_device(pci)) < 0)
		return err;
	pci_set_master(pci);

	/* check if we can restrict PCI DMA transfers to 32 bits */
	if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0) {
		snd_printk(KERN_ERR "architecture does not support 32bit PCI busmaster DMA\n");
		pci_disable_device(pci);
		return -ENXIO;
	}

	/*
	 */
	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
	if (! mgr) {
		pci_disable_device(pci);
		return -ENOMEM;
	}

	mgr->pci = pci;
	mgr->irq = -1;

	/* resource assignment */
	if ((err = pci_request_regions(pci, CARD_NAME)) < 0) {
		kfree(mgr);
		pci_disable_device(pci);
		return err;
	}
	for (i = 0; i < 2; i++) {
		mgr->mem[i].phys = pci_resource_start(pci, i);
		mgr->mem[i].virt = pci_ioremap_bar(pci, i);
		if (!mgr->mem[i].virt) {
		        printk(KERN_ERR "unable to remap resource 0x%lx\n",
			       mgr->mem[i].phys);
			snd_mixart_free(mgr);
			return -EBUSY;
		}
	}

	if (request_irq(pci->irq, snd_mixart_interrupt, IRQF_SHARED,
			KBUILD_MODNAME, mgr)) {
		snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
		snd_mixart_free(mgr);
		return -EBUSY;
	}
	mgr->irq = pci->irq;

	sprintf(mgr->shortname, "Digigram miXart");
	sprintf(mgr->longname, "%s at 0x%lx & 0x%lx, irq %i", mgr->shortname, mgr->mem[0].phys, mgr->mem[1].phys, mgr->irq);

	/* ISR spinlock  */
	spin_lock_init(&mgr->lock);

	/* init mailbox  */
	mgr->msg_fifo_readptr = 0;
	mgr->msg_fifo_writeptr = 0;

	spin_lock_init(&mgr->msg_lock);
	mutex_init(&mgr->msg_mutex);
	init_waitqueue_head(&mgr->msg_sleep);
	atomic_set(&mgr->msg_processed, 0);

	/* init setup mutex*/
	mutex_init(&mgr->setup_mutex);

	/* init message taslket */
	tasklet_init(&mgr->msg_taskq, snd_mixart_msg_tasklet, (unsigned long) mgr);

	/* card assignment */
	mgr->num_cards = MIXART_MAX_CARDS; /* 4  FIXME: configurable? */
	for (i = 0; i < mgr->num_cards; i++) {
		struct snd_card *card;
		char tmpid[16];
		int idx;

		if (index[dev] < 0)
			idx = index[dev];
		else
			idx = index[dev] + i;
		snprintf(tmpid, sizeof(tmpid), "%s-%d", id[dev] ? id[dev] : "MIXART", i);
		err = snd_card_create(idx, tmpid, THIS_MODULE, 0, &card);

		if (err < 0) {
			snd_printk(KERN_ERR "cannot allocate the card %d\n", i);
			snd_mixart_free(mgr);
			return err;
		}

		strcpy(card->driver, CARD_NAME);
		sprintf(card->shortname, "%s [PCM #%d]", mgr->shortname, i);
		sprintf(card->longname, "%s [PCM #%d]", mgr->longname, i);

		if ((err = snd_mixart_create(mgr, card, i)) < 0) {
			snd_card_free(card);
			snd_mixart_free(mgr);
			return err;
		}

		if(i==0) {
			/* init proc interface only for chip0 */
			snd_mixart_proc_init(mgr->chip[i]);
		}

		if ((err = snd_card_register(card)) < 0) {
			snd_mixart_free(mgr);
			return err;
		}
	}

	/* init firmware status (mgr->dsp_loaded reset in hwdep_new) */
	mgr->board_type = MIXART_DAUGHTER_TYPE_NONE;

	/* create array of streaminfo */
	size = PAGE_ALIGN( (MIXART_MAX_STREAM_PER_CARD * MIXART_MAX_CARDS *
			    sizeof(struct mixart_flowinfo)) );
	if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
				size, &mgr->flowinfo) < 0) {
		snd_mixart_free(mgr);
		return -ENOMEM;
	}
	/* init streaminfo_array */
	memset(mgr->flowinfo.area, 0, size);

	/* create array of bufferinfo */
	size = PAGE_ALIGN( (MIXART_MAX_STREAM_PER_CARD * MIXART_MAX_CARDS *
			    sizeof(struct mixart_bufferinfo)) );
	if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
				size, &mgr->bufferinfo) < 0) {
		snd_mixart_free(mgr);
		return -ENOMEM;
	}
	/* init bufferinfo_array */
	memset(mgr->bufferinfo.area, 0, size);

	/* set up firmware */
	err = snd_mixart_setup_firmware(mgr);
	if (err < 0) {
		snd_mixart_free(mgr);
		return err;
	}

	pci_set_drvdata(pci, mgr);
	dev++;
	return 0;
}
Пример #25
0
static int __init chd_dec_pci_probe(struct pci_dev *pdev,
			     const struct pci_device_id *entry)
{
	struct device *dev = &pdev->dev;
	struct crystalhd_adp *pinfo;
	int rc;
	BC_STATUS sts = BC_STS_SUCCESS;

	dev_info(dev, "Starting Device:0x%04x\n", pdev->device);

	pinfo = kzalloc(sizeof(struct crystalhd_adp), GFP_KERNEL);
	if (!pinfo) {
		dev_err(dev, "%s: Failed to allocate memory\n", __func__);
		rc = -ENOMEM;
		goto out;
	}

	pinfo->pdev = pdev;

	rc = pci_enable_device(pdev);
	if (rc) {
		dev_err(dev, "%s: Failed to enable PCI device\n", __func__);
		goto free_priv;
	}

	snprintf(pinfo->name, sizeof(pinfo->name), "crystalhd_pci_e:%d:%d:%d",
		 pdev->bus->number, PCI_SLOT(pdev->devfn),
		 PCI_FUNC(pdev->devfn));

	rc = chd_pci_reserve_mem(pinfo);
	if (rc) {
		dev_err(dev, "%s: Failed to set up memory regions.\n",
			__func__);
		goto disable_device;
	}

	pinfo->present	= 1;
	pinfo->drv_data = entry->driver_data;

	/* Setup adapter level lock.. */
	spin_lock_init(&pinfo->lock);

	/* setup api stuff.. */
	rc = chd_dec_init_chdev(pinfo);
	if (rc)
		goto release_mem;

	rc = chd_dec_enable_int(pinfo);
	if (rc) {
		dev_err(dev, "%s: _enable_int err:%d\n", __func__, rc);
		goto cleanup_chdev;
	}

	/* Set dma mask... */
	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
		pinfo->dmabits = 64;
	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		pinfo->dmabits = 32;
	} else {
		dev_err(dev, "%s: Unabled to setup DMA %d\n", __func__, rc);
		rc = -ENODEV;
		goto cleanup_int;
	}

	sts = crystalhd_setup_cmd_context(&pinfo->cmds, pinfo);
	if (sts != BC_STS_SUCCESS) {
		dev_err(dev, "%s: cmd setup :%d\n", __func__, sts);
		rc = -ENODEV;
		goto cleanup_int;
	}

	pci_set_master(pdev);

	pci_set_drvdata(pdev, pinfo);

	g_adp_info = pinfo;

out:
	return rc;
cleanup_int:
	chd_dec_disable_int(pinfo);
cleanup_chdev:
	chd_dec_release_chdev(pinfo);
release_mem:
	chd_pci_release_mem(pinfo);
disable_device:
	pci_disable_device(pdev);
free_priv:
	kfree(pdev);
	goto out;
}
Пример #26
0
static int __devinit b44_init_one(struct pci_dev *pdev,
				  const struct pci_device_id *ent)
{
	static int b44_version_printed = 0;
	unsigned long b44reg_base, b44reg_len;
	struct net_device *dev;
	struct b44 *bp;
	int err, i;

	if (b44_version_printed++ == 0)
		printk(KERN_INFO "%s", version);

	err = pci_enable_device(pdev);
	if (err) {
		printk(KERN_ERR PFX "Cannot enable PCI device, "
		       "aborting.\n");
		return err;
	}

	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		printk(KERN_ERR PFX "Cannot find proper PCI device "
		       "base address, aborting.\n");
		err = -ENODEV;
		goto err_out_disable_pdev;
	}

	err = pci_request_regions(pdev, DRV_MODULE_NAME);
	if (err) {
		printk(KERN_ERR PFX "Cannot obtain PCI resources, "
		       "aborting.\n");
		goto err_out_disable_pdev;
	}

	pci_set_master(pdev);

	err = pci_set_dma_mask(pdev, (u64) 0xffffffff);
	if (err) {
		printk(KERN_ERR PFX "No usable DMA configuration, "
		       "aborting.\n");
		goto err_out_free_res;
	}

	b44reg_base = pci_resource_start(pdev, 0);
	b44reg_len = pci_resource_len(pdev, 0);

	dev = alloc_etherdev(sizeof(*bp));
	if (!dev) {
		printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
		err = -ENOMEM;
		goto err_out_free_res;
	}

	SET_MODULE_OWNER(dev);
	SET_NETDEV_DEV(dev,&pdev->dev);

	/* No interesting netdevice features in this card... */
	dev->features |= 0;

	bp = netdev_priv(dev);
	bp->pdev = pdev;
	bp->dev = dev;
	if (b44_debug >= 0)
		bp->msg_enable = (1 << b44_debug) - 1;
	else
		bp->msg_enable = B44_DEF_MSG_ENABLE;

	spin_lock_init(&bp->lock);

	bp->regs = ioremap(b44reg_base, b44reg_len);
	if (bp->regs == 0UL) {
		printk(KERN_ERR PFX "Cannot map device registers, "
		       "aborting.\n");
		err = -ENOMEM;
		goto err_out_free_dev;
	}

	bp->rx_pending = B44_DEF_RX_RING_PENDING;
	bp->tx_pending = B44_DEF_TX_RING_PENDING;

	dev->open = b44_open;
	dev->stop = b44_close;
	dev->hard_start_xmit = b44_start_xmit;
	dev->get_stats = b44_get_stats;
	dev->set_multicast_list = b44_set_rx_mode;
	dev->set_mac_address = b44_set_mac_addr;
	dev->do_ioctl = b44_ioctl;
	dev->tx_timeout = b44_tx_timeout;
	dev->poll = b44_poll;
	dev->weight = 64;
	dev->watchdog_timeo = B44_TX_TIMEOUT;
	dev->change_mtu = b44_change_mtu;
	dev->irq = pdev->irq;
	SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);

	err = b44_get_invariants(bp);
	if (err) {
		printk(KERN_ERR PFX "Problem fetching invariants of chip, "
		       "aborting.\n");
		goto err_out_iounmap;
	}

	bp->mii_if.dev = dev;
	bp->mii_if.mdio_read = b44_mii_read;
	bp->mii_if.mdio_write = b44_mii_write;
	bp->mii_if.phy_id = bp->phy_addr;
	bp->mii_if.phy_id_mask = 0x1f;
	bp->mii_if.reg_num_mask = 0x1f;

	/* By default, advertise all speed/duplex settings. */
	bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
		      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);

	/* By default, auto-negotiate PAUSE. */
	bp->flags |= B44_FLAG_PAUSE_AUTO;

	err = register_netdev(dev);
	if (err) {
		printk(KERN_ERR PFX "Cannot register net device, "
		       "aborting.\n");
		goto err_out_iounmap;
	}

	pci_set_drvdata(pdev, dev);

	pci_save_state(bp->pdev, bp->pci_cfg_state);

	printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
	for (i = 0; i < 6; i++)
		printk("%2.2x%c", dev->dev_addr[i],
		       i == 5 ? '\n' : ':');

	return 0;

err_out_iounmap:
	iounmap(bp->regs);

err_out_free_dev:
	free_netdev(dev);

err_out_free_res:
	pci_release_regions(pdev);

err_out_disable_pdev:
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	return err;
}
int __init
setup_netjet_s(struct IsdnCard *card)
{
	int bytecnt;
	struct IsdnCardState *cs = card->cs;
	char tmp[64];
	long flags;

#ifdef __BIG_ENDIAN
#error "not running on big endian machines now"
#endif
	strcpy(tmp, NETjet_S_revision);
	printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
	if (cs->typ != ISDN_CTYPE_NETJET_S)
		return(0);
	test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);

#if CONFIG_PCI

	for ( ;; )
	{
		if (!pci_present()) {
			printk(KERN_ERR "Netjet: no PCI bus present\n");
			return(0);
		}
		if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
			PCI_DEVICE_ID_TIGERJET_300,  dev_netjet))) {
			if (pci_enable_device(dev_netjet))
				return(0);
			pci_set_master(dev_netjet);
			cs->irq = dev_netjet->irq;
			if (!cs->irq) {
				printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
				return(0);
			}
			cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
			if (!cs->hw.njet.base) {
				printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
				return(0);
			}
			/* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
			if ((dev_netjet->subsystem_vendor == 0x55) &&
				(dev_netjet->subsystem_device == 0x02)) {
				printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
				printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
				return(0);
			}
			/* end new code */
		} else {
			printk(KERN_WARNING "NETjet-S: No PCI card found\n");
			return(0);
		}

		cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
		cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;

		save_flags(flags);
		sti();

		cs->hw.njet.ctrl_reg = 0xff;  /* Reset On */
		byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);

		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout((10*HZ)/1000);	/* Timeout 10ms */

		cs->hw.njet.ctrl_reg = 0x00;  /* Reset Off and status read clear */
		byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);

		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout((10*HZ)/1000);	/* Timeout 10ms */

		restore_flags(flags);

		cs->hw.njet.auxd = 0xC0;
		cs->hw.njet.dmactrl = 0;

		byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
		byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
		byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);

		switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) )
		{
			case 0 :
				break;

			case 3 :
				printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" );
				continue;

			default :
				printk( KERN_WARNING "NETjet-S: No PCI card found\n" );
				return 0;
                }
                break;
	}
#else

	printk(KERN_WARNING "NETjet-S: NO_PCI_BIOS\n");
	printk(KERN_WARNING "NETjet-S: unable to config NETJET-S PCI\n");
	return (0);

#endif /* CONFIG_PCI */

	bytecnt = 256;

	printk(KERN_INFO
		"NETjet-S: PCI card configured at %#lx IRQ %d\n",
		cs->hw.njet.base, cs->irq);
	if (check_region(cs->hw.njet.base, bytecnt)) {
		printk(KERN_WARNING
		       "HiSax: %s config port %#lx-%#lx already in use\n",
		       CardType[card->typ],
		       cs->hw.njet.base,
		       cs->hw.njet.base + bytecnt);
		return (0);
	} else {
		request_region(cs->hw.njet.base, bytecnt, "netjet-s isdn");
	}
	reset_netjet_s(cs);
	cs->readisac  = &NETjet_ReadIC;
	cs->writeisac = &NETjet_WriteIC;
	cs->readisacfifo  = &NETjet_ReadICfifo;
	cs->writeisacfifo = &NETjet_WriteICfifo;
	cs->BC_Read_Reg  = &dummyrr;
	cs->BC_Write_Reg = &dummywr;
	cs->BC_Send_Data = &netjet_fill_dma;
	cs->cardmsg = &NETjet_S_card_msg;
	cs->irq_func = &netjet_s_interrupt;
	cs->irq_flags |= SA_SHIRQ;
	ISACVersion(cs, "NETjet-S:");
	return (1);
}
Пример #28
0
static int dt3155_init_board(struct dt3155_priv *pd)
{
	struct pci_dev *pdev = pd->pdev;
	int i;
	u8 tmp = 0;

	pci_set_master(pdev); /* dt3155 needs it */

	/*  resetting the adapter  */
	iowrite32(ADDR_ERR_ODD | ADDR_ERR_EVEN | FLD_CRPT_ODD | FLD_CRPT_EVEN |
			FLD_DN_ODD | FLD_DN_EVEN, pd->regs + CSR1);
	mmiowb();
	msleep(20);

	/*  initializing adapter registers  */
	iowrite32(FIFO_EN | SRST, pd->regs + CSR1);
	mmiowb();
	iowrite32(0xEEEEEE01, pd->regs + EVEN_PIXEL_FMT);
	iowrite32(0xEEEEEE01, pd->regs + ODD_PIXEL_FMT);
	iowrite32(0x00000020, pd->regs + FIFO_TRIGER);
	iowrite32(0x00000103, pd->regs + XFER_MODE);
	iowrite32(0, pd->regs + RETRY_WAIT_CNT);
	iowrite32(0, pd->regs + INT_CSR);
	iowrite32(1, pd->regs + EVEN_FLD_MASK);
	iowrite32(1, pd->regs + ODD_FLD_MASK);
	iowrite32(0, pd->regs + MASK_LENGTH);
	iowrite32(0x0005007C, pd->regs + FIFO_FLAG_CNT);
	iowrite32(0x01010101, pd->regs + IIC_CLK_DUR);
	mmiowb();

	/* verifying that we have a DT3155 board (not just a SAA7116 chip) */
	read_i2c_reg(pd->regs, DT_ID, &tmp);
	if (tmp != DT3155_ID)
		return -ENODEV;

	/* initialize AD LUT */
	write_i2c_reg(pd->regs, AD_ADDR, 0);
	for (i = 0; i < 256; i++)
		write_i2c_reg(pd->regs, AD_LUT, i);

	/* initialize ADC references */
	/* FIXME: pos_ref & neg_ref depend on VT_50HZ */
	write_i2c_reg(pd->regs, AD_ADDR, AD_CMD_REG);
	write_i2c_reg(pd->regs, AD_CMD, VIDEO_CNL_1 | SYNC_CNL_1 | SYNC_LVL_3);
	write_i2c_reg(pd->regs, AD_ADDR, AD_POS_REF);
	write_i2c_reg(pd->regs, AD_CMD, 34);
	write_i2c_reg(pd->regs, AD_ADDR, AD_NEG_REF);
	write_i2c_reg(pd->regs, AD_CMD, 0);

	/* initialize PM LUT */
	write_i2c_reg(pd->regs, CONFIG, pd->config | PM_LUT_PGM);
	for (i = 0; i < 256; i++) {
		write_i2c_reg(pd->regs, PM_LUT_ADDR, i);
		write_i2c_reg(pd->regs, PM_LUT_DATA, i);
	}
	write_i2c_reg(pd->regs, CONFIG, pd->config | PM_LUT_PGM | PM_LUT_SEL);
	for (i = 0; i < 256; i++) {
		write_i2c_reg(pd->regs, PM_LUT_ADDR, i);
		write_i2c_reg(pd->regs, PM_LUT_DATA, i);
	}
	write_i2c_reg(pd->regs, CONFIG, pd->config); /*  ACQ_MODE_EVEN  */

	/* select channel 1 for input and set sync level */
	write_i2c_reg(pd->regs, AD_ADDR, AD_CMD_REG);
	write_i2c_reg(pd->regs, AD_CMD, VIDEO_CNL_1 | SYNC_CNL_1 | SYNC_LVL_3);

	/* disable all irqs, clear all irq flags */
	iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD,
			pd->regs + INT_CSR);

	return 0;
}
Пример #29
0
/**
 * mei_me_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in kcs_pci_tbl
 *
 * Return: 0 on success, <0 on failure.
 */
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
	struct mei_device *dev;
	struct mei_me_hw *hw;
	unsigned int irqflags;
	int err;


	if (!mei_me_quirk_probe(pdev, cfg))
		return -ENODEV;

	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for mei driver */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "failed to get pci regions.\n");
		goto disable_device;
	}

	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {

		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
		if (err)
			err = dma_set_coherent_mask(&pdev->dev,
						    DMA_BIT_MASK(32));
	}
	if (err) {
		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
		goto release_regions;
	}


	/* allocates and initializes the mei dev structure */
	dev = mei_me_dev_init(pdev, cfg);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	hw = to_me_hw(dev);
	/* mapping  IO device memory */
	hw->mem_addr = pci_iomap(pdev, 0, 0);
	if (!hw->mem_addr) {
		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
		err = -ENOMEM;
		goto free_device;
	}
	pci_enable_msi(pdev);

	 /* request and enable interrupt */
	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;

	err = request_threaded_irq(pdev->irq,
			mei_me_irq_quick_handler,
			mei_me_irq_thread_handler,
			irqflags, KBUILD_MODNAME, dev);
	if (err) {
		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
		       pdev->irq);
		goto disable_msi;
	}

	if (mei_start(dev)) {
		dev_err(&pdev->dev, "init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}

	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
	pm_runtime_use_autosuspend(&pdev->dev);

	err = mei_register(dev, &pdev->dev);
	if (err)
		goto release_irq;

	pci_set_drvdata(pdev, dev);

	schedule_delayed_work(&dev->timer_work, HZ);

	/*
	* For not wake-able HW runtime pm framework
	* can't be used on pci device level.
	* Use domain runtime pm callbacks instead.
	*/
	if (!pci_dev_run_wake(pdev))
		mei_me_set_pm_domain(dev);

	if (mei_pg_is_enabled(dev))
		pm_runtime_put_noidle(&pdev->dev);

	dev_dbg(&pdev->dev, "initialization successful.\n");

	return 0;

release_irq:
	mei_cancel_work(dev);
	mei_disable_interrupts(dev);
	free_irq(pdev->irq, dev);
disable_msi:
	pci_disable_msi(pdev);
	pci_iounmap(pdev, hw->mem_addr);
free_device:
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	dev_err(&pdev->dev, "initialization failed.\n");
	return err;
}
Пример #30
0
static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name)
{
	unsigned long bar5	= pci_resource_start(dev, 5);
	unsigned long barsize	= pci_resource_len(dev, 5);
	u8 tmpbyte	= 0;
	void __iomem *ioaddr;

	/*
	 *	Drop back to PIO if we can't map the mmio. Some
	 *	systems seem to get terminally confused in the PCI
	 *	spaces.
	 */
	 
	if(!request_mem_region(bar5, barsize, name))
	{
		printk(KERN_WARNING "siimage: IDE controller MMIO ports not available.\n");
		return 0;
	}
		
	ioaddr = ioremap(bar5, barsize);

	if (ioaddr == NULL)
	{
		release_mem_region(bar5, barsize);
		return 0;
	}

	pci_set_master(dev);
	pci_set_drvdata(dev, (void *) ioaddr);

	if (pdev_is_sata(dev)) {
		writel(0, ioaddr + 0x148);
		writel(0, ioaddr + 0x1C8);
	}

	writeb(0, ioaddr + 0xB4);
	writeb(0, ioaddr + 0xF4);
	tmpbyte = readb(ioaddr + 0x4A);

	switch(tmpbyte & 0x30) {
		case 0x00:
			/* In 100 MHz clocking, try and switch to 133 */
			writeb(tmpbyte|0x10, ioaddr + 0x4A);
			break;
		case 0x10:
			/* On 133Mhz clocking */
			break;
		case 0x20:
			/* On PCIx2 clocking */
			break;
		case 0x30:
			/* Clocking is disabled */
			/* 133 clock attempt to force it on */
			writeb(tmpbyte & ~0x20, ioaddr + 0x4A);
			break;
	}
	
	writeb(      0x72, ioaddr + 0xA1);
	writew(    0x328A, ioaddr + 0xA2);
	writel(0x62DD62DD, ioaddr + 0xA4);
	writel(0x43924392, ioaddr + 0xA8);
	writel(0x40094009, ioaddr + 0xAC);
	writeb(      0x72, ioaddr + 0xE1);
	writew(    0x328A, ioaddr + 0xE2);
	writel(0x62DD62DD, ioaddr + 0xE4);
	writel(0x43924392, ioaddr + 0xE8);
	writel(0x40094009, ioaddr + 0xEC);

	if (pdev_is_sata(dev)) {
		writel(0xFFFF0000, ioaddr + 0x108);
		writel(0xFFFF0000, ioaddr + 0x188);
		writel(0x00680000, ioaddr + 0x148);
		writel(0x00680000, ioaddr + 0x1C8);
	}

	tmpbyte = readb(ioaddr + 0x4A);

	proc_reports_siimage(dev, (tmpbyte>>4), name);
	return 1;
}