Example #1
0
/**
 * This function is called by the PCI core when it has a struct pci_dev that it 
 * thinks the driver wants to control. It will allocate the memory for the struct
 * alt_up_pci_dev, initialize it correctly and dynamically allocate a character
 * device node.
 *
 * @param[in] dev The pointer to the pci device that evokes the probe function.
 * @param[in] id  The pci_id_table of the driver.
 * 
 * @return Return 0 on success.
 */
static int  __devinit alt_up_pci_probe (struct pci_dev *dev, const struct pci_device_id *id) {

	int i, retval = 0;

	// allocate the memory for the struct alt_up_pci_dev
	struct alt_up_pci_dev *mydev = kmalloc( sizeof(struct alt_up_pci_dev), GFP_KERNEL );
	if (mydev == NULL){
		printk(KERN_DEBUG "kmalloc() memory for struct alt_up_pci_dev failed. \n");
		goto err_alloc_dev;
	}
	
	// save the pointers for the future usage
	pci_set_drvdata(dev, (void *)mydev);
	mydev->pci_dev = dev;
	
	// wake up the device             
	retval = pci_enable_device(dev);
	if (retval) {
		printk(KERN_DEBUG "pci_enable_device() failed. \n");
		goto err_enable_device;
	}

	// enables bus-mastering for device dev       
	pci_set_master(dev);
	
	// reserved PCI I/O and memory resources
	retval = pci_request_regions(dev, DRV_NAME);
	if (retval) {
		printk(KERN_DEBUG "pci_request_regions() failed. \n");
		goto err_request_regions;
	}
			
	// set the DMA addressing limitation
	retval = pci_set_dma_mask(dev, DMA_BIT_MASK( pci_dma_bit_range ));
	if (retval) {
		printk(KERN_DEBUG "pci_set_dma_mask() failed. \n");
		goto err_set_dma_mask;      
	}

	retval = pci_set_consistent_dma_mask(dev,DMA_BIT_MASK( pci_dma_bit_range ));
	if(retval) {
		printk(KERN_DEBUG "pci_set_consistent_dma_mask() failed. \n");
		goto err_set_dma_mask;
	}

	// set __iomem address, accessed by ioread, iowrite
	for (i = 0; i < MAX_NUM_OF_BARS; i ++) {
		if ( pci_resource_end(dev, i) != pci_resource_start(dev, i) ){

			/* create a virtual mapping cookie for a PCI BAR, 
			 * second arg is BAR, third is maxlen (0 means complete BAR) */
			mydev->bar[i] = pci_iomap(dev, i, 0); 
			if( !mydev->bar[i] ){
				printk(KERN_DEBUG "pci_iomap() failed. \n");
				goto err_iomap;
			}
			
			printk(KERN_DEBUG DRV_NAME " BAR%d initialized.\n", i);
			mydev->bar_size[i] = pci_resource_end(dev, i) - pci_resource_start(dev, i) + 1;
			
		} else  mydev->bar[i] = NULL;
	}

	// initialize the alt_up_pci_dev struct
	retval = alt_up_pci_dev_init(mydev);
	if(retval) {
		printk(KERN_DEBUG "alt_up_pci_dev_init() failed. \n");
		goto err_dev_init;
	}
	
	// have MSI enabled on its device function    
	retval = pci_enable_msi(dev);
	if (retval) {
		printk(KERN_DEBUG "pci_enable_msi() failed. \n");
		goto err_enable_msi;        
	}
			
	// request irq line for interrupt
	mydev->irq_line = dev->irq;
	retval = request_irq((int)mydev->irq_line, (void*)alt_up_pci_irqhandler, IRQF_SHARED, DRV_NAME, (void *)mydev);
	if (retval) {
		printk(KERN_DEBUG "pci_request_irq() failed. \n");
		goto err_request_irq;
	}

	// write irq_line to the PCI configuration space
	retval = pci_write_config_byte(dev, PCI_INTERRUPT_LINE, mydev->irq_line);
	if (retval) {
		printk(KERN_DEBUG "pci_read_config() failed. \n");
		goto err_write_config;       
	}  

	/* dynamically allocate a character device node
	 * 0 : requested minor
	 * 1 : count 
	 */
	retval = alloc_chrdev_region(&mydev->cdev_no, 0, 1, DRV_NAME);
	if(retval) {
		printk(KERN_DEBUG "alloc_chrdev_region() failed. \n");
		goto err_alloc_chrdev;
	}

	// init the cdev
	cdev_init(&mydev->cdev, &alt_up_pci_fops);
	mydev->cdev.owner = THIS_MODULE;
	mydev->cdev.ops = &alt_up_pci_fops;
	
	// add the cdev to kernel, from now on, the driver is alive
	retval = cdev_add(&mydev->cdev, mydev->cdev_no, 1);   /* 1: count */
	if(retval) {
		printk(KERN_DEBUG "cdev_add() failed. \n");
		goto err_cdev_add;
	}
	
	return 0;
	
	
	//cdev_del(&mydev->cdev);
err_cdev_add:
	unregister_chrdev_region(mydev->cdev_no, 1);  
err_alloc_chrdev:

err_write_config:
	free_irq(mydev->irq_line, (void *)mydev);
err_request_irq:
	pci_disable_msi(dev);
err_enable_msi:
	alt_up_pci_dev_exit(mydev);
err_dev_init:
	for (i = 0; i < MAX_NUM_OF_BARS; i ++) {
		if( mydev->bar[i] != NULL )
			pci_iounmap(dev, mydev->bar[i]);
	}
	goto err_set_dma_mask;
err_iomap:     
	for ( i = i - 1; i >= 0; i --){
		if( mydev->bar[i] != NULL)
			pci_iounmap(dev, mydev->bar[i]);
	}
err_set_dma_mask:
	pci_release_regions(dev);
err_request_regions:
	pci_disable_device(dev);
err_enable_device:
	kfree(mydev);
err_alloc_dev:
	printk("alt_up_pci_probe() failed with error: %d \n ", retval);

	return retval;        
}
Example #2
0
int rtbt_hps_iface_init(
	IN int if_type,
	IN void *if_dev,
	IN struct rtbt_os_ctrl *os_ctrl)
{
	struct hci_dev *hdev;

	printk("--->%s(): if_type=%d\n", __FUNCTION__, if_type);

	/* Initialize HCI device */
	hdev = hci_alloc_dev();
	if (!hdev) {
		printk("Can't allocate HCI device\n");
		return -1;
	}

	switch (if_type) {
		case RAL_INF_PCI:
			{
				struct pci_dev *pcidev = (struct pci_dev *)if_dev;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
				hdev->bus = HCI_PCI;
				hdev->dev_type = HCI_BREDR;
#else
				hdev->type = HCI_PCI;
#endif
				pci_set_drvdata(pcidev, hdev);
				SET_HCIDEV_DEV(hdev, &pcidev->dev);
			}
			break;

		default:
			printk("invalid if_type(%d)!\n", if_type);
			hci_free_dev(hdev);
			return -1;
	}
g_hdev=hdev;
	os_ctrl->bt_dev = hdev;
	os_ctrl->if_dev = if_dev;
	os_ctrl->hps_ops->recv = rtbt_hci_dev_receive;

	hci_set_drvdata(hdev, os_ctrl);
	hdev->open = rtbt_hci_dev_open;
	hdev->close = rtbt_hci_dev_close;
	hdev->flush = rtbt_hci_dev_flush;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,7)
	hdev->send = rtbt_hci_dev_send_legacy;
#else
	hdev->send = rtbt_hci_dev_send;
#endif
//	hdev->destruct = rtbt_hci_dev_destruct;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,7)
	hdev->ioctl = rtbt_hci_dev_ioctl;
#endif

//	hdev->owner = THIS_MODULE;

	printk("<--%s():alloc hdev(0x%lx) done\n", __FUNCTION__, (ULONG)hdev);

	return 0;
}
static int __devinit rtl8180_probe(struct pci_dev *pdev,
				   const struct pci_device_id *id)
{
	struct ieee80211_hw *dev;
	struct rtl8180_priv *priv;
	unsigned long mem_addr, mem_len;
	unsigned int io_addr, io_len;
	int err, i;
	struct eeprom_93cx6 eeprom;
	const char *chip_name, *rf_name = NULL;
	u32 reg;
	u16 eeprom_val;
	DECLARE_MAC_BUF(mac);

	err = pci_enable_device(pdev);
	if (err) {
		printk(KERN_ERR "%s (rtl8180): Cannot enable new PCI device\n",
		       pci_name(pdev));
		return err;
	}

	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		printk(KERN_ERR "%s (rtl8180): Cannot obtain PCI resources\n",
		       pci_name(pdev));
		return err;
	}

	io_addr = pci_resource_start(pdev, 0);
	io_len = pci_resource_len(pdev, 0);
	mem_addr = pci_resource_start(pdev, 1);
	mem_len = pci_resource_len(pdev, 1);

	if (mem_len < sizeof(struct rtl818x_csr) ||
	    io_len < sizeof(struct rtl818x_csr)) {
		printk(KERN_ERR "%s (rtl8180): Too short PCI resources\n",
		       pci_name(pdev));
		err = -ENOMEM;
		goto err_free_reg;
	}

	if ((err = pci_set_dma_mask(pdev, 0xFFFFFF00ULL)) ||
	    (err = pci_set_consistent_dma_mask(pdev, 0xFFFFFF00ULL))) {
		printk(KERN_ERR "%s (rtl8180): No suitable DMA available\n",
		       pci_name(pdev));
		goto err_free_reg;
	}

	pci_set_master(pdev);

	dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8180_ops);
	if (!dev) {
		printk(KERN_ERR "%s (rtl8180): ieee80211 alloc failed\n",
		       pci_name(pdev));
		err = -ENOMEM;
		goto err_free_reg;
	}

	priv = dev->priv;
	priv->pdev = pdev;

	SET_IEEE80211_DEV(dev, &pdev->dev);
	pci_set_drvdata(pdev, dev);

	priv->map = pci_iomap(pdev, 1, mem_len);
	if (!priv->map)
		priv->map = pci_iomap(pdev, 0, io_len);

	if (!priv->map) {
		printk(KERN_ERR "%s (rtl8180): Cannot map device memory\n",
		       pci_name(pdev));
		goto err_free_dev;
	}

	BUILD_BUG_ON(sizeof(priv->channels) != sizeof(rtl818x_channels));
	BUILD_BUG_ON(sizeof(priv->rates) != sizeof(rtl818x_rates));

	memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels));
	memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));

	priv->band.band = IEEE80211_BAND_2GHZ;
	priv->band.channels = priv->channels;
	priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
	priv->band.bitrates = priv->rates;
	priv->band.n_bitrates = 4;
	dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;

	dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
		     IEEE80211_HW_RX_INCLUDES_FCS |
		     IEEE80211_HW_SIGNAL_UNSPEC;
	dev->queues = 1;
	dev->max_signal = 65;

	reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
	reg &= RTL818X_TX_CONF_HWVER_MASK;
	switch (reg) {
	case RTL818X_TX_CONF_R8180_ABCD:
		chip_name = "RTL8180";
		break;
	case RTL818X_TX_CONF_R8180_F:
		chip_name = "RTL8180vF";
		break;
	case RTL818X_TX_CONF_R8185_ABC:
		chip_name = "RTL8185";
		break;
	case RTL818X_TX_CONF_R8185_D:
		chip_name = "RTL8185vD";
		break;
	default:
		printk(KERN_ERR "%s (rtl8180): Unknown chip! (0x%x)\n",
		       pci_name(pdev), reg >> 25);
		goto err_iounmap;
	}

	priv->r8185 = reg & RTL818X_TX_CONF_R8185_ABC;
	if (priv->r8185) {
		priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
		pci_try_set_mwi(pdev);
	}

	eeprom.data = dev;
	eeprom.register_read = rtl8180_eeprom_register_read;
	eeprom.register_write = rtl8180_eeprom_register_write;
	if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
		eeprom.width = PCI_EEPROM_WIDTH_93C66;
	else
		eeprom.width = PCI_EEPROM_WIDTH_93C46;

	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_PROGRAM);
	rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
	udelay(10);

	eeprom_93cx6_read(&eeprom, 0x06, &eeprom_val);
	eeprom_val &= 0xFF;
	switch (eeprom_val) {
	case 1:	rf_name = "Intersil";
		break;
	case 2:	rf_name = "RFMD";
		break;
	case 3:	priv->rf = &sa2400_rf_ops;
		break;
	case 4:	priv->rf = &max2820_rf_ops;
		break;
	case 5:	priv->rf = &grf5101_rf_ops;
		break;
	case 9:	priv->rf = rtl8180_detect_rf(dev);
		break;
	case 10:
		rf_name = "RTL8255";
		break;
	default:
		printk(KERN_ERR "%s (rtl8180): Unknown RF! (0x%x)\n",
		       pci_name(pdev), eeprom_val);
		goto err_iounmap;
	}

	if (!priv->rf) {
		printk(KERN_ERR "%s (rtl8180): %s RF frontend not supported!\n",
		       pci_name(pdev), rf_name);
		goto err_iounmap;
	}

	eeprom_93cx6_read(&eeprom, 0x17, &eeprom_val);
	priv->csthreshold = eeprom_val >> 8;
	if (!priv->r8185) {
		__le32 anaparam;
		eeprom_93cx6_multiread(&eeprom, 0xD, (__le16 *)&anaparam, 2);
		priv->anaparam = le32_to_cpu(anaparam);
		eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
	}

	eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)dev->wiphy->perm_addr, 3);
	if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
		printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
		       " randomly generated MAC addr\n", pci_name(pdev));
		random_ether_addr(dev->wiphy->perm_addr);
	}

	/* CCK TX power */
	for (i = 0; i < 14; i += 2) {
		u16 txpwr;
		eeprom_93cx6_read(&eeprom, 0x10 + (i >> 1), &txpwr);
		priv->channels[i].hw_value = txpwr & 0xFF;
		priv->channels[i + 1].hw_value = txpwr >> 8;
	}

	/* OFDM TX power */
	if (priv->r8185) {
		for (i = 0; i < 14; i += 2) {
			u16 txpwr;
			eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr);
			priv->channels[i].hw_value |= (txpwr & 0xFF) << 8;
			priv->channels[i + 1].hw_value |= txpwr & 0xFF00;
		}
	}

	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);

	spin_lock_init(&priv->lock);

	err = ieee80211_register_hw(dev);
	if (err) {
		printk(KERN_ERR "%s (rtl8180): Cannot register device\n",
		       pci_name(pdev));
		goto err_iounmap;
	}

	printk(KERN_INFO "%s: hwaddr %s, %s + %s\n",
	       wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr),
	       chip_name, priv->rf->name);

	return 0;

 err_iounmap:
	iounmap(priv->map);

 err_free_dev:
	pci_set_drvdata(pdev, NULL);
	ieee80211_free_hw(dev);

 err_free_reg:
	pci_release_regions(pdev);
	pci_disable_device(pdev);
	return err;
}
static void __devexit snd_cs5530_remove(struct pci_dev *pci)
{
	snd_card_free(pci_get_drvdata(pci));
	pci_set_drvdata(pci, NULL);
}
static int __devinit isp1761_pci_probe(struct pci_dev *dev,
		const struct pci_device_id *id)
{
	u8 latency, limit;
	__u32 reg_data;
	int retry_count;
	struct usb_hcd *hcd;
	unsigned int devflags = 0;
	int ret_status = 0;

	resource_size_t pci_mem_phy0;
	resource_size_t memlength;

	u8 __iomem *chip_addr;
	u8 __iomem *iobase;
	resource_size_t nxp_pci_io_base;
	resource_size_t iolength;

	if (usb_disabled())
		return -ENODEV;

	if (pci_enable_device(dev) < 0)
		return -ENODEV;

	if (!dev->irq)
		return -ENODEV;

	/* Grab the PLX PCI mem maped port start address we need  */
	nxp_pci_io_base = pci_resource_start(dev, 0);
	iolength = pci_resource_len(dev, 0);

	if (!request_mem_region(nxp_pci_io_base, iolength, "ISP1761 IO MEM")) {
		printk(KERN_ERR "request region #1\n");
		return -EBUSY;
	}

	iobase = ioremap_nocache(nxp_pci_io_base, iolength);
	if (!iobase) {
		printk(KERN_ERR "ioremap #1\n");
		ret_status = -ENOMEM;
		goto cleanup1;
	}
	/* Grab the PLX PCI shared memory of the ISP 1761 we need  */
	pci_mem_phy0 = pci_resource_start(dev, 3);
	memlength = pci_resource_len(dev, 3);
	if (memlength < 0xffff) {
		printk(KERN_ERR "memory length for this resource is wrong\n");
		ret_status = -ENOMEM;
		goto cleanup2;
	}

	if (!request_mem_region(pci_mem_phy0, memlength, "ISP-PCI")) {
		printk(KERN_ERR "host controller already in use\n");
		ret_status = -EBUSY;
		goto cleanup2;
	}

	/* map available memory */
	chip_addr = ioremap_nocache(pci_mem_phy0,memlength);
	if (!chip_addr) {
		printk(KERN_ERR "Error ioremap failed\n");
		ret_status = -ENOMEM;
		goto cleanup3;
	}

	/* bad pci latencies can contribute to overruns */
	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &latency);
	if (latency) {
		pci_read_config_byte(dev, PCI_MAX_LAT, &limit);
		if (limit && limit < latency)
			pci_write_config_byte(dev, PCI_LATENCY_TIMER, limit);
	}

	/* Try to check whether we can access Scratch Register of
	 * Host Controller or not. The initial PCI access is retried until
	 * local init for the PCI bridge is completed
	 */
	retry_count = 20;
	reg_data = 0;
	while ((reg_data != 0xFACE) && retry_count) {
		/*by default host is in 16bit mode, so
		 * io operations at this stage must be 16 bit
		 * */
		writel(0xface, chip_addr + HC_SCRATCH_REG);
		udelay(100);
		reg_data = readl(chip_addr + HC_SCRATCH_REG) & 0x0000ffff;
		retry_count--;
	}

	iounmap(chip_addr);

	/* Host Controller presence is detected by writing to scratch register
	 * and reading back and checking the contents are same or not
	 */
	if (reg_data != 0xFACE) {
		dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data);
		ret_status = -ENOMEM;
		goto cleanup3;
	}

	pci_set_master(dev);

	/* configure PLX PCI chip to pass interrupts */
#define PLX_INT_CSR_REG 0x68
	reg_data = readl(iobase + PLX_INT_CSR_REG);
	reg_data |= 0x900;
	writel(reg_data, iobase + PLX_INT_CSR_REG);

	dev->dev.dma_mask = NULL;
	hcd = isp1760_register(pci_mem_phy0, memlength, dev->irq,
		IRQF_SHARED, -ENOENT, &dev->dev, dev_name(&dev->dev),
		devflags);
	if (IS_ERR(hcd)) {
		ret_status = -ENODEV;
		goto cleanup3;
	}

	/* done with PLX IO access */
	iounmap(iobase);
	release_mem_region(nxp_pci_io_base, iolength);

	pci_set_drvdata(dev, hcd);
	return 0;

cleanup3:
	release_mem_region(pci_mem_phy0, memlength);
cleanup2:
	iounmap(iobase);
cleanup1:
	release_mem_region(nxp_pci_io_base, iolength);
	return ret_status;
}
Example #6
0
static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct vbox_private *vbox;
	int ret = 0;

	if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
		return -ENODEV;

	vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
	if (!vbox)
		return -ENOMEM;

	ret = drm_dev_init(&vbox->ddev, &driver, &pdev->dev);
	if (ret) {
		kfree(vbox);
		return ret;
	}

	vbox->ddev.pdev = pdev;
	vbox->ddev.dev_private = vbox;
	pci_set_drvdata(pdev, vbox);
	mutex_init(&vbox->hw_mutex);

	ret = pci_enable_device(pdev);
	if (ret)
		goto err_dev_put;

	ret = vbox_hw_init(vbox);
	if (ret)
		goto err_pci_disable;

	ret = vbox_mm_init(vbox);
	if (ret)
		goto err_hw_fini;

	ret = vbox_mode_init(vbox);
	if (ret)
		goto err_mm_fini;

	ret = vbox_irq_init(vbox);
	if (ret)
		goto err_mode_fini;

	ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper,
					&vbox_fb_helper_funcs, 32,
					vbox->num_crtcs);
	if (ret)
		goto err_irq_fini;

	ret = drm_dev_register(&vbox->ddev, 0);
	if (ret)
		goto err_fbdev_fini;

	return 0;

err_fbdev_fini:
	vbox_fbdev_fini(vbox);
err_irq_fini:
	vbox_irq_fini(vbox);
err_mode_fini:
	vbox_mode_fini(vbox);
err_mm_fini:
	vbox_mm_fini(vbox);
err_hw_fini:
	vbox_hw_fini(vbox);
err_pci_disable:
	pci_disable_device(pdev);
err_dev_put:
	drm_dev_put(&vbox->ddev);
	return ret;
}
Example #7
0
static int __devinit chd_dec_pci_probe(struct pci_dev *pdev,
			     const struct pci_device_id *entry)
{
	struct crystalhd_adp *pinfo;
	int rc;
	enum BC_STATUS sts = BC_STS_SUCCESS;

	BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x "
	       "s_vendor:0x%04x s_device: 0x%04x\n",
	       pdev->vendor, pdev->device, pdev->subsystem_vendor,
	       pdev->subsystem_device);

	pinfo = kzalloc(sizeof(struct crystalhd_adp), GFP_KERNEL);
	if (!pinfo) {
		BCMLOG_ERR("Failed to allocate memory\n");
		return -ENOMEM;
	}

	pinfo->pdev = pdev;

	rc = pci_enable_device(pdev);
	if (rc) {
		BCMLOG_ERR("Failed to enable PCI device\n");
		goto err;
	}

	snprintf(pinfo->name, sizeof(pinfo->name), "crystalhd_pci_e:%d:%d:%d",
		 pdev->bus->number, PCI_SLOT(pdev->devfn),
		 PCI_FUNC(pdev->devfn));

	rc = chd_pci_reserve_mem(pinfo);
	if (rc) {
		BCMLOG_ERR("Failed to setup memory regions.\n");
		pci_disable_device(pdev);
		rc = -ENOMEM;
		goto err;
	}

	pinfo->present	= 1;
	pinfo->drv_data = entry->driver_data;

	/* Setup adapter level lock.. */
	spin_lock_init(&pinfo->lock);

	/* setup api stuff.. */
	chd_dec_init_chdev(pinfo);
	rc = chd_dec_enable_int(pinfo);
	if (rc) {
		BCMLOG_ERR("_enable_int err:%d\n", rc);
		pci_disable_device(pdev);
		rc = -ENODEV;
		goto err;
	}

	/* Set dma mask... */
	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
		pinfo->dmabits = 64;
	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		pinfo->dmabits = 32;
	} else {
		BCMLOG_ERR("Unabled to setup DMA %d\n", rc);
		pci_disable_device(pdev);
		rc = -ENODEV;
		goto err;
	}

	sts = crystalhd_setup_cmd_context(&pinfo->cmds, pinfo);
	if (sts != BC_STS_SUCCESS) {
		BCMLOG_ERR("cmd setup :%d\n", sts);
		pci_disable_device(pdev);
		rc = -ENODEV;
		goto err;
	}

	pci_set_master(pdev);

	pci_set_drvdata(pdev, pinfo);

	g_adp_info = pinfo;

	return 0;

err:
	kfree(pinfo);
	return rc;
}
Example #8
0
/*
 * Attach to a PCI probed interface
 */
static int __devinit
pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct device_node *np;
	pmac_ide_hwif_t *pmif;
	void __iomem *base;
	unsigned long rbase, rlen;
	int rc;
	struct ide_hw hw;

	np = pci_device_to_OF_node(pdev);
	if (np == NULL) {
		printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n");
		return -ENODEV;
	}

	pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
	if (pmif == NULL)
		return -ENOMEM;

	if (pci_enable_device(pdev)) {
		printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
				    "%s\n", np->full_name);
		rc = -ENXIO;
		goto out_free_pmif;
	}
	pci_set_master(pdev);
			
	if (pci_request_regions(pdev, "Kauai ATA")) {
		printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for "
				"%s\n", np->full_name);
		rc = -ENXIO;
		goto out_free_pmif;
	}

	pmif->mdev = NULL;
	pmif->node = np;

	rbase = pci_resource_start(pdev, 0);
	rlen = pci_resource_len(pdev, 0);

	base = ioremap(rbase, rlen);
	pmif->regbase = (unsigned long) base + 0x2000;
	pmif->dma_regs = base + 0x1000;
	pmif->kauai_fcr = base;
	pmif->irq = pdev->irq;

	pci_set_drvdata(pdev, pmif);

	memset(&hw, 0, sizeof(hw));
	pmac_ide_init_ports(&hw, pmif->regbase);
	hw.irq = pdev->irq;
	hw.dev = &pdev->dev;

	rc = pmac_ide_setup_device(pmif, &hw);
	if (rc != 0) {
		/* The inteface is released to the common IDE layer */
		pci_set_drvdata(pdev, NULL);
		iounmap(base);
		pci_release_regions(pdev);
		kfree(pmif);
	}

	return rc;

out_free_pmif:
	kfree(pmif);
	return rc;
}
Example #9
0
static int __devinit snd_nm256_probe(struct pci_dev *pci,
				     const struct pci_device_id *pci_id)
{
	struct snd_card *card;
	struct nm256 *chip;
	int err;
	const struct snd_pci_quirk *q;

	q = snd_pci_quirk_lookup(pci, nm256_quirks);
	if (q) {
		snd_printdd(KERN_INFO "nm256: Enabled quirk for %s.\n", q->name);
		switch (q->value) {
		case NM_BLACKLISTED:
			printk(KERN_INFO "nm256: The device is blacklisted. "
			       "Loading stopped\n");
			return -ENODEV;
		case NM_RESET_WORKAROUND_2:
			reset_workaround_2 = 1;
			/* Fall-through */
		case NM_RESET_WORKAROUND:
			reset_workaround = 1;
			break;
		}
	}

	card = snd_card_new(index, id, THIS_MODULE, 0);
	if (card == NULL)
		return -ENOMEM;

	switch (pci->device) {
	case PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO:
		strcpy(card->driver, "NM256AV");
		break;
	case PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO:
		strcpy(card->driver, "NM256ZX");
		break;
	case PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO:
		strcpy(card->driver, "NM256XL+");
		break;
	default:
		snd_printk(KERN_ERR "invalid device id 0x%x\n", pci->device);
		snd_card_free(card);
		return -EINVAL;
	}

	if (vaio_hack)
		buffer_top = 0x25a800;	/* this avoids conflicts with XFree86 server */

	if (playback_bufsize < 4)
		playback_bufsize = 4;
	if (playback_bufsize > 128)
		playback_bufsize = 128;
	if (capture_bufsize < 4)
		capture_bufsize = 4;
	if (capture_bufsize > 128)
		capture_bufsize = 128;
	if ((err = snd_nm256_create(card, pci, &chip)) < 0) {
		snd_card_free(card);
		return err;
	}
	card->private_data = chip;

	if (reset_workaround) {
		snd_printdd(KERN_INFO "nm256: reset_workaround activated\n");
		chip->reset_workaround = 1;
	}

	if (reset_workaround_2) {
		snd_printdd(KERN_INFO "nm256: reset_workaround_2 activated\n");
		chip->reset_workaround_2 = 1;
	}

	if ((err = snd_nm256_pcm(chip, 0)) < 0 ||
	    (err = snd_nm256_mixer(chip)) < 0) {
		snd_card_free(card);
		return err;
	}

	sprintf(card->shortname, "NeoMagic %s", card->driver);
	sprintf(card->longname, "%s at 0x%lx & 0x%lx, irq %d",
		card->shortname,
		chip->buffer_addr, chip->cport_addr, chip->irq);

	if ((err = snd_card_register(card)) < 0) {
		snd_card_free(card);
		return err;
	}

	pci_set_drvdata(pci, card);
	return 0;
}
Example #10
0
static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
					 const struct pci_device_id *id)
{
	struct bcma_bus *bus;
	int err = -ENOMEM;
	const char *name;
	u32 val;

	/* Alloc */
	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
	if (!bus)
		goto out;

	/* Basic PCI configuration */
	err = pci_enable_device(dev);
	if (err)
		goto err_kfree_bus;

	name = dev_name(&dev->dev);
	if (dev->driver && dev->driver->name)
		name = dev->driver->name;
	err = pci_request_regions(dev, name);
	if (err)
		goto err_pci_disable;
	pci_set_master(dev);

	/* Disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state */
	pci_read_config_dword(dev, 0x40, &val);
	if ((val & 0x0000ff00) != 0)
		pci_write_config_dword(dev, 0x40, val & 0xffff00ff);

	/* SSB needed additional powering up, do we have any AMBA PCI cards? */
	if (!pci_is_pcie(dev))
		bcma_err(bus, "PCI card detected, report problems.\n");

	/* Map MMIO */
	err = -ENOMEM;
	bus->mmio = pci_iomap(dev, 0, ~0UL);
	if (!bus->mmio)
		goto err_pci_release_regions;

	/* Host specific */
	bus->host_pci = dev;
	bus->hosttype = BCMA_HOSTTYPE_PCI;
	bus->ops = &bcma_host_pci_ops;

	bus->boardinfo.vendor = bus->host_pci->subsystem_vendor;
	bus->boardinfo.type = bus->host_pci->subsystem_device;

	/* Register */
	err = bcma_bus_register(bus);
	if (err)
		goto err_pci_unmap_mmio;

	pci_set_drvdata(dev, bus);

out:
	return err;

err_pci_unmap_mmio:
	pci_iounmap(dev, bus->mmio);
err_pci_release_regions:
	pci_release_regions(dev);
err_pci_disable:
	pci_disable_device(dev);
err_kfree_bus:
	kfree(bus);
	return err;
}
Example #11
0
int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
	const struct pci_device_id *pci_id)
{
	int idx, nm;
	unsigned int memlen;
	struct hpi_message hm;
	struct hpi_response hr;
	struct hpi_adapter adapter;
	struct hpi_pci pci;

	memset(&adapter, 0, sizeof(adapter));

	dev_printk(KERN_DEBUG, &pci_dev->dev,
		"probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor,
		pci_dev->device, pci_dev->subsystem_vendor,
		pci_dev->subsystem_device, pci_dev->devfn);

	if (pci_enable_device(pci_dev) < 0) {
		dev_printk(KERN_ERR, &pci_dev->dev,
			"pci_enable_device failed, disabling device\n");
		return -EIO;
	}

	pci_set_master(pci_dev);	/* also sets latency timer if < 16 */

	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
		HPI_SUBSYS_CREATE_ADAPTER);
	hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER,
		HPI_ERROR_PROCESSING_MESSAGE);

	hm.adapter_index = HPI_ADAPTER_INDEX_INVALID;

	adapter.pci = pci_dev;

	nm = HPI_MAX_ADAPTER_MEM_SPACES;

	for (idx = 0; idx < nm; idx++) {
		HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx,
			&pci_dev->resource[idx]);

		if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) {
			memlen = pci_resource_len(pci_dev, idx);
			adapter.ap_remapped_mem_base[idx] =
				ioremap(pci_resource_start(pci_dev, idx),
				memlen);
			if (!adapter.ap_remapped_mem_base[idx]) {
				HPI_DEBUG_LOG(ERROR,
					"ioremap failed, aborting\n");
				/* unmap previously mapped pci mem space */
				goto err;
			}
		}

		pci.ap_mem_base[idx] = adapter.ap_remapped_mem_base[idx];
	}

	pci.pci_dev = pci_dev;
	hm.u.s.resource.bus_type = HPI_BUS_PCI;
	hm.u.s.resource.r.pci = &pci;

	/* call CreateAdapterObject on the relevant hpi module */
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
	if (hr.error)
		goto err;

	if (prealloc_stream_buf) {
		adapter.p_buffer = vmalloc(prealloc_stream_buf);
		if (!adapter.p_buffer) {
			HPI_DEBUG_LOG(ERROR,
				"HPI could not allocate "
				"kernel buffer size %d\n",
				prealloc_stream_buf);
			goto err;
		}
	}

	adapter.index = hr.u.s.adapter_index;
	adapter.type = hr.u.s.adapter_type;

	hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
		HPI_ADAPTER_OPEN);
	hm.adapter_index = adapter.index;
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);

	if (hr.error)
		goto err;

	adapter.snd_card_asihpi = NULL;
	/* WARNING can't init mutex in 'adapter'
	 * and then copy it to adapters[] ?!?!
	 */
	adapters[adapter.index] = adapter;
	mutex_init(&adapters[adapter.index].mutex);
	pci_set_drvdata(pci_dev, &adapters[adapter.index]);

	dev_printk(KERN_INFO, &pci_dev->dev,
		"probe succeeded for ASI%04X HPI index %d\n", adapter.type,
		adapter.index);

	return 0;

err:
	for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
		if (adapter.ap_remapped_mem_base[idx]) {
			iounmap(adapter.ap_remapped_mem_base[idx]);
			adapter.ap_remapped_mem_base[idx] = NULL;
		}
	}

	if (adapter.p_buffer) {
		adapter.buffer_size = 0;
		vfree(adapter.p_buffer);
	}

	HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n");
	return -ENODEV;
}
Example #12
0
static int orinoco_nortel_init_one(struct pci_dev *pdev,
				   const struct pci_device_id *ent)
{
	int err;
	struct orinoco_private *priv;
	struct orinoco_pci_card *card;
	void __iomem *hermes_io, *bridge_io, *attr_io;

	err = pci_enable_device(pdev);
	if (err) {
;
		return err;
	}

	err = pci_request_regions(pdev, DRIVER_NAME);
	if (err) {
;
		goto fail_resources;
	}

	bridge_io = pci_iomap(pdev, 0, 0);
	if (!bridge_io) {
;
		err = -EIO;
		goto fail_map_bridge;
	}

	attr_io = pci_iomap(pdev, 1, 0);
	if (!attr_io) {
;
		err = -EIO;
		goto fail_map_attr;
	}

	hermes_io = pci_iomap(pdev, 2, 0);
	if (!hermes_io) {
;
		err = -EIO;
		goto fail_map_hermes;
	}

	/* Allocate network device */
	priv = alloc_orinocodev(sizeof(*card), &pdev->dev,
				orinoco_nortel_cor_reset, NULL);
	if (!priv) {
;
		err = -ENOMEM;
		goto fail_alloc;
	}

	card = priv->card;
	card->bridge_io = bridge_io;
	card->attr_io = attr_io;

	hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);

	err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED,
			  DRIVER_NAME, priv);
	if (err) {
;
		err = -EBUSY;
		goto fail_irq;
	}

	err = orinoco_nortel_hw_init(card);
	if (err) {
;
		goto fail;
	}

	err = orinoco_nortel_cor_reset(priv);
	if (err) {
;
		goto fail;
	}

	err = orinoco_init(priv);
	if (err) {
;
		goto fail;
	}

	err = orinoco_if_add(priv, 0, 0, NULL);
	if (err) {
;
		goto fail;
	}

	pci_set_drvdata(pdev, priv);

	return 0;

 fail:
	free_irq(pdev->irq, priv);

 fail_irq:
	pci_set_drvdata(pdev, NULL);
	free_orinocodev(priv);

 fail_alloc:
	pci_iounmap(pdev, hermes_io);

 fail_map_hermes:
	pci_iounmap(pdev, attr_io);

 fail_map_attr:
	pci_iounmap(pdev, bridge_io);

 fail_map_bridge:
	pci_release_regions(pdev);

 fail_resources:
	pci_disable_device(pdev);

	return err;
}
static int spi_pci_probe(struct pci_dev *pdev,
	const struct pci_device_id *ent)
{
	struct dw_spi_pci *dwpci;
	struct dw_spi *dws;
	int pci_bar = 0;
	int ret;

	printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n",
		pdev->vendor, pdev->device);

	ret = pci_enable_device(pdev);
	if (ret)
		return ret;

	dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL);
	if (!dwpci) {
		ret = -ENOMEM;
		goto err_disable;
	}

	dwpci->pdev = pdev;
	dws = &dwpci->dws;

	/* Get basic io resource and map it */
	dws->paddr = pci_resource_start(pdev, pci_bar);
	dws->iolen = pci_resource_len(pdev, pci_bar);

	ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
	if (ret)
		goto err_kfree;

	dws->regs = ioremap_nocache((unsigned long)dws->paddr,
				pci_resource_len(pdev, pci_bar));
	if (!dws->regs) {
		ret = -ENOMEM;
		goto err_release_reg;
	}

	dws->parent_dev = &pdev->dev;
	dws->bus_num = 0;
	dws->num_cs = 4;
	dws->irq = pdev->irq;

	/*
	 * Specific handling for Intel MID paltforms, like dma setup,
	 * clock rate, FIFO depth.
	 */
	if (pdev->device == 0x0800) {
		ret = dw_spi_mid_init(dws);
		if (ret)
			goto err_unmap;
	}

	ret = dw_spi_add_host(dws);
	if (ret)
		goto err_unmap;

	/* PCI hook and SPI hook use the same drv data */
	pci_set_drvdata(pdev, dwpci);
	return 0;

err_unmap:
	iounmap(dws->regs);
err_release_reg:
	pci_release_region(pdev, pci_bar);
err_kfree:
	kfree(dwpci);
err_disable:
	pci_disable_device(pdev);
	return ret;
}
/* pdev is NULL for eisa */
static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
{
	struct request_queue *q;
	int j;

	/* 
	 * register block devices
	 * Find disks and fill in structs
	 * Get an interrupt, set the Q depth and get into /proc
	 */

	/* If this successful it should insure that we are the only */
	/* instance of the driver */
	if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
		goto Enomem4;
	}
	hba[i]->access.set_intr_mask(hba[i], 0);
	if (request_irq(hba[i]->intr, do_ida_intr,
		IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
	{
		printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
				hba[i]->intr, hba[i]->devname);
		goto Enomem3;
	}
		
	for (j=0; j<NWD; j++) {
		ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
		if (!ida_gendisk[i][j])
			goto Enomem2;
	}

	hba[i]->cmd_pool = pci_alloc_consistent(
		hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
		&(hba[i]->cmd_pool_dhandle));
	hba[i]->cmd_pool_bits = kcalloc(
		(NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG, sizeof(unsigned long),
		GFP_KERNEL);

	if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
			goto Enomem1;

	memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
	printk(KERN_INFO "cpqarray: Finding drives on %s",
		hba[i]->devname);

	spin_lock_init(&hba[i]->lock);
	q = blk_init_queue(do_ida_request, &hba[i]->lock);
	if (!q)
		goto Enomem1;

	hba[i]->queue = q;
	q->queuedata = hba[i];

	getgeometry(i);
	start_fwbk(i);

	ida_procinit(i);

	if (pdev)
		blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);

	/* This is a hardware imposed limit. */
	blk_queue_max_hw_segments(q, SG_MAX);

	/* This is a driver limit and could be eliminated. */
	blk_queue_max_phys_segments(q, SG_MAX);
	
	init_timer(&hba[i]->timer);
	hba[i]->timer.expires = jiffies + IDA_TIMER;
	hba[i]->timer.data = (unsigned long)hba[i];
	hba[i]->timer.function = ida_timer;
	add_timer(&hba[i]->timer);

	/* Enable IRQ now that spinlock and rate limit timer are set up */
	hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);

	for(j=0; j<NWD; j++) {
		struct gendisk *disk = ida_gendisk[i][j];
		drv_info_t *drv = &hba[i]->drv[j];
		sprintf(disk->disk_name, "ida/c%dd%d", i, j);
		disk->major = COMPAQ_SMART2_MAJOR + i;
		disk->first_minor = j<<NWD_SHIFT;
		disk->fops = &ida_fops;
		if (j && !drv->nr_blks)
			continue;
		blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
		set_capacity(disk, drv->nr_blks);
		disk->queue = hba[i]->queue;
		disk->private_data = drv;
		add_disk(disk);
	}

	/* done ! */
	return(i);

Enomem1:
	nr_ctlr = i; 
	kfree(hba[i]->cmd_pool_bits);
	if (hba[i]->cmd_pool)
		pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t), 
				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
Enomem2:
	while (j--) {
		put_disk(ida_gendisk[i][j]);
		ida_gendisk[i][j] = NULL;
	}
	free_irq(hba[i]->intr, hba[i]);
Enomem3:
	unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
Enomem4:
	if (pdev)
		pci_set_drvdata(pdev, NULL);
	release_io_mem(hba[i]);
	free_hba(i);

	printk( KERN_ERR "cpqarray: out of memory");

	return -1;
}
Example #15
0
static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct wil6210_priv *wil;
	struct device *dev = &pdev->dev;
	void __iomem *csr;
	struct wil_board *board = (struct wil_board *)id->driver_data;
	int rc;

	/* check HW */
	dev_info(&pdev->dev, WIL_NAME
		 " \"%s\" device found [%04x:%04x] (rev %x)\n", board->name,
		 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);

	if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
		dev_err(&pdev->dev, "Not " WIL_NAME "? "
			"BAR0 size is %lu while expecting %lu\n",
			(ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
		return -ENODEV;
	}

	rc = pci_enable_device(pdev);
	if (rc) {
		dev_err(&pdev->dev,
			"pci_enable_device failed, retry with MSI only\n");
		/* Work around for platforms that can't allocate IRQ:
		 * retry with MSI only
		 */
		pdev->msi_enabled = 1;
		rc = pci_enable_device(pdev);
	}
	if (rc)
		return -ENODEV;
	/* rollback to err_disable_pdev */

	rc = pci_request_region(pdev, 0, WIL_NAME);
	if (rc) {
		dev_err(&pdev->dev, "pci_request_region failed\n");
		goto err_disable_pdev;
	}
	/* rollback to err_release_reg */

	csr = pci_ioremap_bar(pdev, 0);
	if (!csr) {
		dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
		rc = -ENODEV;
		goto err_release_reg;
	}
	/* rollback to err_iounmap */
	dev_info(&pdev->dev, "CSR at %pR -> 0x%p\n", &pdev->resource[0], csr);

	wil = wil_if_alloc(dev, csr);
	if (IS_ERR(wil)) {
		rc = (int)PTR_ERR(wil);
		dev_err(dev, "wil_if_alloc failed: %d\n", rc);
		goto err_iounmap;
	}
	/* rollback to if_free */

	pci_set_drvdata(pdev, wil);
	wil->pdev = pdev;
	wil->board = board;

	wil6210_clear_irq(wil);

	wil->platform_handle =
			wil_platform_init(&pdev->dev, &wil->platform_ops);

	/* FW should raise IRQ when ready */
	rc = wil_if_pcie_enable(wil);
	if (rc) {
		wil_err(wil, "Enable device failed\n");
		goto if_free;
	}
	/* rollback to bus_disable */

	rc = wil_if_add(wil);
	if (rc) {
		wil_err(wil, "wil_if_add failed: %d\n", rc);
		goto bus_disable;
	}

	wil6210_debugfs_init(wil);

	/* check FW is alive */
	wmi_echo(wil);

	return 0;

 bus_disable:
	wil_if_pcie_disable(wil);
 if_free:
	if (wil->platform_ops.uninit)
		wil->platform_ops.uninit(wil->platform_handle);
	wil_if_free(wil);
 err_iounmap:
	pci_iounmap(pdev, csr);
 err_release_reg:
	pci_release_region(pdev, 0);
 err_disable_pdev:
	pci_disable_device(pdev);

	return rc;
}
static int intel_gpio_probe(struct pci_dev *pdev,
			  const struct pci_device_id *id)
{
	void __iomem *base;
	struct intel_mid_gpio *priv;
	u32 gpio_base;
	u32 irq_base;
	int retval;
	struct intel_mid_gpio_ddata *ddata =
				(struct intel_mid_gpio_ddata *)id->driver_data;

	retval = pcim_enable_device(pdev);
	if (retval)
		return retval;

	retval = pcim_iomap_regions(pdev, 1 << 0 | 1 << 1, pci_name(pdev));
	if (retval) {
		dev_err(&pdev->dev, "I/O memory mapping error\n");
		return retval;
	}

	base = pcim_iomap_table(pdev)[1];

	irq_base = readl(base);
	gpio_base = readl(sizeof(u32) + base);

	/* release the IO mapping, since we already get the info from bar1 */
	pcim_iounmap_regions(pdev, 1 << 1);

	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		dev_err(&pdev->dev, "can't allocate chip data\n");
		return -ENOMEM;
	}

	priv->reg_base = pcim_iomap_table(pdev)[0];
	priv->chip.label = dev_name(&pdev->dev);
	priv->chip.dev = &pdev->dev;
	priv->chip.request = intel_gpio_request;
	priv->chip.direction_input = intel_gpio_direction_input;
	priv->chip.direction_output = intel_gpio_direction_output;
	priv->chip.get = intel_gpio_get;
	priv->chip.set = intel_gpio_set;
	priv->chip.to_irq = intel_gpio_to_irq;
	priv->chip.base = gpio_base;
	priv->chip.ngpio = ddata->ngpio;
	priv->chip.can_sleep = false;
	priv->pdev = pdev;

	spin_lock_init(&priv->lock);

	priv->domain = irq_domain_add_simple(pdev->dev.of_node, ddata->ngpio,
					irq_base, &intel_gpio_irq_ops, priv);
	if (!priv->domain)
		return -ENOMEM;

	pci_set_drvdata(pdev, priv);
	retval = gpiochip_add(&priv->chip);
	if (retval) {
		dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
		return retval;
	}

	intel_mid_irq_init_hw(priv);

	irq_set_handler_data(pdev->irq, priv);
	irq_set_chained_handler(pdev->irq, intel_mid_irq_handler);

	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_allow(&pdev->dev);

	return 0;
}
Example #17
0
static int __devinit snd_card_cs46xx_probe(struct pci_dev *pci,
					   const struct pci_device_id *pci_id)
{
	static int dev;
	struct snd_card *card;
	struct snd_cs46xx *chip;
	int err;

	if (dev >= SNDRV_CARDS)
		return -ENODEV;
	if (!enable[dev]) {
		dev++;
		return -ENOENT;
	}

	card = snd_card_new(index[dev], id[dev], THIS_MODULE, 0);
	if (card == NULL)
		return -ENOMEM;
	if ((err = snd_cs46xx_create(card, pci,
				     external_amp[dev], thinkpad[dev],
				     &chip)) < 0) {
		snd_card_free(card);
		return err;
	}
	card->private_data = chip;
	chip->accept_valid = mmap_valid[dev];
	if ((err = snd_cs46xx_pcm(chip, 0, NULL)) < 0) {
		snd_card_free(card);
		return err;
	}
#ifdef CONFIG_SND_CS46XX_NEW_DSP
	if ((err = snd_cs46xx_pcm_rear(chip,1, NULL)) < 0) {
		snd_card_free(card);
		return err;
	}
	if ((err = snd_cs46xx_pcm_iec958(chip,2,NULL)) < 0) {
		snd_card_free(card);
		return err;
	}
#endif
	if ((err = snd_cs46xx_mixer(chip, 2)) < 0) {
		snd_card_free(card);
		return err;
	}
#ifdef CONFIG_SND_CS46XX_NEW_DSP
	if (chip->nr_ac97_codecs ==2) {
		if ((err = snd_cs46xx_pcm_center_lfe(chip,3,NULL)) < 0) {
			snd_card_free(card);
			return err;
		}
	}
#endif
	if ((err = snd_cs46xx_midi(chip, 0, NULL)) < 0) {
		snd_card_free(card);
		return err;
	}
	if ((err = snd_cs46xx_start_dsp(chip)) < 0) {
		snd_card_free(card);
		return err;
	}


	snd_cs46xx_gameport(chip);

	strcpy(card->driver, "CS46xx");
	strcpy(card->shortname, "Sound Fusion CS46xx");
	sprintf(card->longname, "%s at 0x%lx/0x%lx, irq %i",
		card->shortname,
		chip->ba0_addr,
		chip->ba1_addr,
		chip->irq);

	if ((err = snd_card_register(card)) < 0) {
		snd_card_free(card);
		return err;
	}

	pci_set_drvdata(pci, card);
	dev++;
	return 0;
}
Example #18
0
static int __devinit probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
    struct address_info *hw_config;
    unsigned long base;
    void __iomem *mem;
    unsigned long io;
    u16 map;
    u8 irq, dma8, dma16;
    int oldquiet;
    extern int sb_be_quiet;

    base = pci_resource_start(pdev, 0);
    if(base == 0UL)
        return 1;

    mem = ioremap(base, 128);
    if (!mem)
        return 1;
    map = readw(mem + 0x18);	/* Read the SMI enables */
    iounmap(mem);

    /* Map bits
    	0:1	* 0x20 + 0x200 = sb base
    	2	sb enable
    	3	adlib enable
    	5	MPU enable 0x330
    	6	MPU enable 0x300

       The other bits may be used internally so must be masked */

    io = 0x220 + 0x20 * (map & 3);

    if(map & (1<<2))
        printk(KERN_INFO "kahlua: XpressAudio at 0x%lx\n", io);
    else
        return 1;

    if(map & (1<<5))
        printk(KERN_INFO "kahlua: MPU at 0x300\n");
    else if(map & (1<<6))
        printk(KERN_INFO "kahlua: MPU at 0x330\n");

    irq = mixer_read(io, 0x80) & 0x0F;
    dma8 = mixer_read(io, 0x81);

    // printk("IRQ=%x MAP=%x DMA=%x\n", irq, map, dma8);

    if(dma8 & 0x20)
        dma16 = 5;
    else if(dma8 & 0x40)
        dma16 = 6;
    else if(dma8 & 0x80)
        dma16 = 7;
    else
    {
        printk(KERN_ERR "kahlua: No 16bit DMA enabled.\n");
        return 1;
    }

    if(dma8 & 0x01)
        dma8 = 0;
    else if(dma8 & 0x02)
        dma8 = 1;
    else if(dma8 & 0x08)
        dma8 = 3;
    else
    {
        printk(KERN_ERR "kahlua: No 8bit DMA enabled.\n");
        return 1;
    }

    if(irq & 1)
        irq = 9;
    else if(irq & 2)
        irq = 5;
    else if(irq & 4)
        irq = 7;
    else if(irq & 8)
        irq = 10;
    else
    {
        printk(KERN_ERR "kahlua: SB IRQ not set.\n");
        return 1;
    }

    printk(KERN_INFO "kahlua: XpressAudio on IRQ %d, DMA %d, %d\n",
           irq, dma8, dma16);

    hw_config = kzalloc(sizeof(struct address_info), GFP_KERNEL);
    if(hw_config == NULL)
    {
        printk(KERN_ERR "kahlua: out of memory.\n");
        return 1;
    }

    pci_set_drvdata(pdev, hw_config);

    hw_config->io_base = io;
    hw_config->irq = irq;
    hw_config->dma = dma8;
    hw_config->dma2 = dma16;
    hw_config->name = "Cyrix XpressAudio";
    hw_config->driver_use_1 = SB_NO_MIDI | SB_PCI_IRQ;

    if (!request_region(io, 16, "soundblaster"))
        goto err_out_free;

    if(sb_dsp_detect(hw_config, 0, 0, NULL)==0)
    {
        printk(KERN_ERR "kahlua: audio not responding.\n");
        release_region(io, 16);
        goto err_out_free;
    }

    oldquiet = sb_be_quiet;
    sb_be_quiet = 1;
    if(sb_dsp_init(hw_config, THIS_MODULE))
    {
        sb_be_quiet = oldquiet;
        goto err_out_free;
    }
    sb_be_quiet = oldquiet;

    return 0;

err_out_free:
    pci_set_drvdata(pdev, NULL);
    kfree(hw_config);
    return 1;
}
Example #19
0
/*
* Jeff: this function should be called under ioctl (rtnl_lock is accquired) while 
* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
*/
int rtw_change_ifname(_adapter *padapter, const char *ifname)
{
	struct net_device *pnetdev;
	struct net_device *cur_pnetdev = padapter->pnetdev;
	struct rereg_nd_name_data *rereg_priv;
	int ret;

	if(!padapter)
		goto error;

	rereg_priv = &padapter->rereg_nd_name_priv;
	
	//free the old_pnetdev
	if(rereg_priv->old_pnetdev) {
		free_netdev(rereg_priv->old_pnetdev);
		rereg_priv->old_pnetdev = NULL;
	}

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
	if(!rtnl_is_locked())
		unregister_netdev(cur_pnetdev);
	else
#endif
		unregister_netdevice(cur_pnetdev);

	#ifdef CONFIG_PROC_DEBUG
	rtw_proc_remove_one(cur_pnetdev);
	#endif //CONFIG_PROC_DEBUG

	rereg_priv->old_pnetdev=cur_pnetdev;

	pnetdev = rtw_init_netdev(padapter);
	if (!pnetdev)  {
		ret = -1;
		goto error;
	}

#ifdef CONFIG_USB_HCI

	SET_NETDEV_DEV(pnetdev, &padapter->dvobjpriv.pusbintf->dev);

	usb_set_intfdata(padapter->dvobjpriv.pusbintf, pnetdev);

#elif defined(CONFIG_PCI_HCI)

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
	SET_NETDEV_DEV(pnetdev, &padapter->dvobjpriv.ppcidev->dev);
#endif

	pci_set_drvdata(padapter->dvobjpriv.ppcidev, pnetdev);

#endif

	rtw_init_netdev_name(pnetdev, ifname);

	_rtw_memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
	if(!rtnl_is_locked())
		ret = register_netdev(pnetdev);
	else
#endif
		ret = register_netdevice(pnetdev);

	if ( ret != 0) {
		RT_TRACE(_module_hci_intfs_c_,_drv_err_,("register_netdev() failed\n"));
		goto error;
	}

	#ifdef CONFIG_PROC_DEBUG
	rtw_proc_init_one(pnetdev);
	#endif //CONFIG_PROC_DEBUG

	return 0;

error:
	
	return -1;
	
}
Example #20
0
static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
					    const struct pci_device_id *pci_id)
{
	static int dev;
	snd_card_t *card;
	emu10k1_t *emu;
#ifdef ENABLE_SYNTH
	snd_seq_device_t *wave = NULL;
#endif
	int err;

	if (dev >= SNDRV_CARDS)
        	return -ENODEV;
	if (!enable[dev]) {
		dev++;
		return -ENOENT;
	}

	card = snd_card_new(index[dev], id[dev], THIS_MODULE, 0);
	if (card == NULL)
		return -ENOMEM;
	if (max_buffer_size[dev] < 32)
		max_buffer_size[dev] = 32;
	else if (max_buffer_size[dev] > 1024)
		max_buffer_size[dev] = 1024;
	if ((err = snd_emu10k1_create(card, pci, extin[dev], extout[dev],
				      (long)max_buffer_size[dev] * 1024 * 1024,
				      enable_ir[dev],
				      &emu)) < 0) {
		snd_card_free(card);
		return err;
	}		
	if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0) {
		snd_card_free(card);
		return err;
	}		
	if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0) {
		snd_card_free(card);
		return err;
	}		
	if ((err = snd_emu10k1_pcm_efx(emu, 2, NULL)) < 0) {
		snd_card_free(card);
		return err;
	}		
	if ((err = snd_emu10k1_mixer(emu)) < 0) {
		snd_card_free(card);
		return err;
	}
	if (emu->audigy) {
		if ((err = snd_emu10k1_audigy_midi(emu)) < 0) {
			snd_card_free(card);
			return err;
		}
	} else {
		if ((err = snd_emu10k1_midi(emu)) < 0) {
			snd_card_free(card);
			return err;
		}
	}
	if ((err = snd_emu10k1_fx8010_new(emu, 0, NULL)) < 0) {
		snd_card_free(card);
		return err;
	}
#ifdef ENABLE_SYNTH
	if (snd_seq_device_new(card, 1, SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH,
			       sizeof(snd_emu10k1_synth_arg_t), &wave) < 0 ||
	    wave == NULL) {
		snd_printk("can't initialize Emu10k1 wavetable synth\n");
	} else {
		snd_emu10k1_synth_arg_t *arg;
		arg = SNDRV_SEQ_DEVICE_ARGPTR(wave);
		strcpy(wave->name, "Emu-10k1 Synth");
		arg->hwptr = emu;
		arg->index = 1;
		arg->seq_ports = seq_ports[dev];
		arg->max_voices = max_synth_voices[dev];
	}
#endif
 
	if (emu->audigy && (emu->revision == 4) ) {
		strcpy(card->driver, "Audigy2");
		strcpy(card->shortname, "Sound Blaster Audigy2");
	} else if (emu->audigy) {
		strcpy(card->driver, "Audigy");
		strcpy(card->shortname, "Sound Blaster Audigy");
	} else if (emu->APS) {
		strcpy(card->driver, "E-mu APS");
		strcpy(card->shortname, "E-mu APS");
	} else {
		strcpy(card->driver, "EMU10K1");
		strcpy(card->shortname, "Sound Blaster Live!");
	}
	sprintf(card->longname, "%s (rev.%d) at 0x%lx, irq %i", card->shortname, emu->revision, emu->port, emu->irq);

	if ((err = snd_card_register(card)) < 0) {
		snd_card_free(card);
		return err;
	}
	pci_set_drvdata(pci, card);
	dev++;
	return 0;
}
Example #21
0
static int __devinit ilo_probe(struct pci_dev *pdev,
			       const struct pci_device_id *ent)
{
	int devnum, minor, start, error;
	struct ilo_hwinfo *ilo_hw;

	/*                                    */
	for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) {
		if (ilo_hwdev[devnum] == 0) {
			ilo_hwdev[devnum] = 1;
			break;
		}
	}

	if (devnum == MAX_ILO_DEV) {
		dev_err(&pdev->dev, "Error finding free device\n");
		return -ENODEV;
	}

	/*                                          */
	error = -ENOMEM;
	ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL);
	if (!ilo_hw)
		goto out;

	ilo_hw->ilo_dev = pdev;
	spin_lock_init(&ilo_hw->alloc_lock);
	spin_lock_init(&ilo_hw->fifo_lock);
	spin_lock_init(&ilo_hw->open_lock);

	error = pci_enable_device(pdev);
	if (error)
		goto free;

	pci_set_master(pdev);

	error = pci_request_regions(pdev, ILO_NAME);
	if (error)
		goto disable;

	error = ilo_map_device(pdev, ilo_hw);
	if (error)
		goto free_regions;

	pci_set_drvdata(pdev, ilo_hw);
	clear_device(ilo_hw);

	error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw);
	if (error)
		goto unmap;

	ilo_enable_interrupts(ilo_hw);

	cdev_init(&ilo_hw->cdev, &ilo_fops);
	ilo_hw->cdev.owner = THIS_MODULE;
	start = devnum * MAX_CCB;
	error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB);
	if (error) {
		dev_err(&pdev->dev, "Could not add cdev\n");
		goto remove_isr;
	}

	for (minor = 0 ; minor < MAX_CCB; minor++) {
		struct device *dev;
		dev = device_create(ilo_class, &pdev->dev,
				    MKDEV(ilo_major, minor), NULL,
				    "hpilo!d%dccb%d", devnum, minor);
		if (IS_ERR(dev))
			dev_err(&pdev->dev, "Could not create files\n");
	}

	return 0;
remove_isr:
	ilo_disable_interrupts(ilo_hw);
	free_irq(pdev->irq, ilo_hw);
unmap:
	ilo_unmap_device(pdev, ilo_hw);
free_regions:
	pci_release_regions(pdev);
disable:
	pci_disable_device(pdev);
free:
	kfree(ilo_hw);
out:
	ilo_hwdev[devnum] = 0;
	return error;
}
Example #22
0
static int __devinit p54p_probe(struct pci_dev *pdev,
				const struct pci_device_id *id)
{
	struct p54p_priv *priv;
	struct ieee80211_hw *dev;
	unsigned long mem_addr, mem_len;
	int err;

	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
		return err;
	}

	mem_addr = pci_resource_start(pdev, 0);
	mem_len = pci_resource_len(pdev, 0);
	if (mem_len < sizeof(struct p54p_csr)) {
		dev_err(&pdev->dev, "Too short PCI resources\n");
		goto err_disable_dev;
	}

	err = pci_request_regions(pdev, "p54pci");
	if (err) {
		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
		goto err_disable_dev;
	}

	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
	    pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
		dev_err(&pdev->dev, "No suitable DMA available\n");
		goto err_free_reg;
	}

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	pci_write_config_byte(pdev, 0x40, 0);
	pci_write_config_byte(pdev, 0x41, 0);

	dev = p54_init_common(sizeof(*priv));
	if (!dev) {
		dev_err(&pdev->dev, "ieee80211 alloc failed\n");
		err = -ENOMEM;
		goto err_free_reg;
	}

	priv = dev->priv;
	priv->pdev = pdev;

	SET_IEEE80211_DEV(dev, &pdev->dev);
	pci_set_drvdata(pdev, dev);

	priv->map = ioremap(mem_addr, mem_len);
	if (!priv->map) {
		dev_err(&pdev->dev, "Cannot map device memory\n");
		err = -ENOMEM;
		goto err_free_dev;
	}

	priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
						  &priv->ring_control_dma);
	if (!priv->ring_control) {
		dev_err(&pdev->dev, "Cannot allocate rings\n");
		err = -ENOMEM;
		goto err_iounmap;
	}
	priv->common.open = p54p_open;
	priv->common.stop = p54p_stop;
	priv->common.tx = p54p_tx;

	spin_lock_init(&priv->lock);
	tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);

	err = request_firmware(&priv->firmware, "isl3886pci",
			       &priv->pdev->dev);
	if (err) {
		dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
		err = request_firmware(&priv->firmware, "isl3886",
				       &priv->pdev->dev);
		if (err)
			goto err_free_common;
	}

	err = p54p_open(dev);
	if (err)
		goto err_free_common;
	err = p54_read_eeprom(dev);
	p54p_stop(dev);
	if (err)
		goto err_free_common;

	err = p54_register_common(dev, &pdev->dev);
	if (err)
		goto err_free_common;

	return 0;

 err_free_common:
	release_firmware(priv->firmware);
	pci_free_consistent(pdev, sizeof(*priv->ring_control),
			    priv->ring_control, priv->ring_control_dma);

 err_iounmap:
	iounmap(priv->map);

 err_free_dev:
	pci_set_drvdata(pdev, NULL);
	p54_free_common(dev);

 err_free_reg:
	pci_release_regions(pdev);
 err_disable_dev:
	pci_disable_device(pdev);
	return err;
}
Example #23
0
static INT __devinit   rt2860_probe(
    IN  struct pci_dev              *pci_dev, 
    IN  const struct pci_device_id  *pci_id)
{
	VOID 				*pAd = NULL;
	struct  net_device	*net_dev;
	PVOID				handle;
	PSTRING				print_name;
	ULONG				csr_addr;
	INT rv = 0;
	RTMP_OS_NETDEV_OP_HOOK	netDevHook;
	ULONG					OpMode;

	DBGPRINT(RT_DEBUG_TRACE, ("===> rt2860_probe\n"));

/*PCIDevInit============================================== */
	/* wake up and enable device */
	if ((rv = pci_enable_device(pci_dev))!= 0)
	{
		DBGPRINT(RT_DEBUG_ERROR, ("Enable PCI device failed, errno=%d!\n", rv));
		return rv;
	}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
	print_name = (PSTRING)pci_name(pci_dev);
#else
	print_name = pci_dev->slot_name;
#endif /* LINUX_VERSION_CODE */

	if ((rv = pci_request_regions(pci_dev, print_name)) != 0)
	{
		DBGPRINT(RT_DEBUG_ERROR, ("Request PCI resource failed, errno=%d!\n", rv));
		goto err_out;
	}
	
	/* map physical address to virtual address for accessing register */
	csr_addr = (unsigned long) ioremap(pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0));
	if (!csr_addr)
	{
		DBGPRINT(RT_DEBUG_ERROR, ("ioremap failed for device %s, region 0x%lX @ 0x%lX\n",
					print_name, (ULONG)pci_resource_len(pci_dev, 0), (ULONG)pci_resource_start(pci_dev, 0)));
		goto err_out_free_res;
	}
	else
	{
		DBGPRINT(RT_DEBUG_TRACE, ("%s: at 0x%lx, VA 0x%lx, IRQ %d. \n",  print_name, 
					(ULONG)pci_resource_start(pci_dev, 0), (ULONG)csr_addr, pci_dev->irq));
	}

	/* Set DMA master */
	pci_set_master(pci_dev);


/*RtmpDevInit============================================== */
	/* Allocate RTMP_ADAPTER adapter structure */
	os_alloc_mem(NULL, (UCHAR **)&handle, sizeof(struct os_cookie));
	if (handle == NULL)
	{
		DBGPRINT(RT_DEBUG_ERROR, ("%s(): Allocate memory for os handle failed!\n", __FUNCTION__));
		goto err_out_iounmap;
	}
	memset(handle, 0, sizeof(struct os_cookie));

	((POS_COOKIE)handle)->pci_dev = pci_dev;

#ifdef OS_ABL_FUNC_SUPPORT
{
	RTMP_PCI_CONFIG PciConfig;
	PciConfig.ConfigVendorID = PCI_VENDOR_ID;
	/* get DRIVER operations */
	RTMP_DRV_OPS_FUNCTION(pRtmpDrvOps, NULL, &PciConfig, NULL);
}
#endif /* OS_ABL_FUNC_SUPPORT */

	rv = RTMPAllocAdapterBlock(handle, &pAd);	/* we may need the pci_dev for allocate structure of "RTMP_ADAPTER" */
	if (rv != NDIS_STATUS_SUCCESS) 
		goto err_out_iounmap;
	/* Here are the RTMP_ADAPTER structure with pci-bus specific parameters. */
	RTMP_DRIVER_PCI_CSR_SET(pAd, csr_addr);

	RTMP_DRIVER_PCIE_INIT(pAd, pci_dev);

/*NetDevInit============================================== */
	net_dev = RtmpPhyNetDevInit(pAd, &netDevHook);
	if (net_dev == NULL)
		goto err_out_free_radev;
	
	/* Here are the net_device structure with pci-bus specific parameters. */
	net_dev->irq = pci_dev->irq;		/* Interrupt IRQ number */
	net_dev->base_addr = csr_addr;		/* Save CSR virtual address and irq to device structure */
	pci_set_drvdata(pci_dev, net_dev);	/* Set driver data */
	

/*All done, it's time to register the net device to linux kernel. */
	/* Register this device */
#ifdef RT_CFG80211_SUPPORT
{
/*	pAd->pCfgDev = &(pci_dev->dev); */
/*	pAd->CFG80211_Register = CFG80211_Register; */
/*	RTMP_DRIVER_CFG80211_INIT(pAd, pci_dev); */

	/*
		In 2.6.32, cfg80211 register must be before register_netdevice();
		We can not put the register in rt28xx_open();
		Or you will suffer NULL pointer in list_add of
		cfg80211_netdev_notifier_call().
	*/
	CFG80211_Register(pAd, &(pci_dev->dev), net_dev);
}
#endif /* RT_CFG80211_SUPPORT */

	RTMP_DRIVER_OP_MODE_GET(pAd, &OpMode);
	rv = RtmpOSNetDevAttach(OpMode, net_dev, &netDevHook);
	if (rv)
		goto err_out_free_netdev;


/*#ifdef KTHREAD_SUPPORT */
#ifdef PRE_ASSIGN_MAC_ADDR
	UCHAR PermanentAddress[MAC_ADDR_LEN];
	RTMP_DRIVER_MAC_ADDR_GET(pAd, &PermanentAddress[0]);
	DBGPRINT(RT_DEBUG_TRACE, ("@%s MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__, PermanentAddress[0], PermanentAddress[1],PermanentAddress[2],PermanentAddress[3],PermanentAddress[4],PermanentAddress[5]));
	/* Set up the Mac address */
	RtmpOSNetDevAddrSet(OpMode, net_dev, &PermanentAddress[0], NULL);
#endif /* PRE_ASSIGN_MAC_ADDR */

	wl_proc_init();

	DBGPRINT(RT_DEBUG_TRACE, ("<=== rt2860_probe\n"));

	return 0; /* probe ok */


	/* --------------------------- ERROR HANDLE --------------------------- */
err_out_free_netdev:
	RtmpOSNetDevFree(net_dev);
	
err_out_free_radev:
	/* free RTMP_ADAPTER strcuture and os_cookie*/
	RTMPFreeAdapter(pAd);

err_out_iounmap:
	iounmap((void *)(csr_addr));
	release_mem_region(pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); 
	
err_out_free_res:
	pci_release_regions(pci_dev);
	
err_out:
	pci_disable_device(pci_dev);

	DBGPRINT(RT_DEBUG_ERROR, ("<=== rt2860_probe failed with rv = %d!\n", rv));

	return -ENODEV; /* probe fail */
}
static int __devinit init_one(struct pci_dev *pdev,
			      const struct pci_device_id *ent)
{
	static int version_printed;

	int i, err, pci_using_dac = 0;
	unsigned long mmio_start, mmio_len;
	const struct board_info *bi;
	struct adapter *adapter = NULL;
	struct port_info *pi;

	if (!version_printed) {
		printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
		       DRV_VERSION);
		++version_printed;
	}

	err = pci_enable_device(pdev);
	if (err)
        return err;

	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		CH_ERR("%s: cannot find PCI device memory base address\n",
		       pci_name(pdev));
		err = -ENODEV;
		goto out_disable_pdev;
	}

	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
		pci_using_dac = 1;

		if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
			CH_ERR("%s: unable to obtain 64-bit DMA for"
			       "consistent allocations\n", pci_name(pdev));
			err = -ENODEV;
			goto out_disable_pdev;
		}

	} else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
		CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
		goto out_disable_pdev;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
		goto out_disable_pdev;
	}

	pci_set_master(pdev);

    mmio_start = pci_resource_start(pdev, 0);
	mmio_len = pci_resource_len(pdev, 0);
	bi = t1_get_board_info(ent->driver_data);

	for (i = 0; i < bi->port_number; ++i) {
		struct net_device *netdev;

		netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
		if (!netdev) {
			err = -ENOMEM;
			goto out_free_dev;
		}

		SET_MODULE_OWNER(netdev);
		SET_NETDEV_DEV(netdev, &pdev->dev);

		if (!adapter) {
			adapter = netdev->priv;
			adapter->pdev = pdev;
			adapter->port[0].dev = netdev;  /* so we don't leak it */

			adapter->regs = ioremap(mmio_start, mmio_len);
			if (!adapter->regs) {
				CH_ERR("%s: cannot map device registers\n",
				       pci_name(pdev));
				err = -ENOMEM;
				goto out_free_dev;
			}

			if (t1_get_board_rev(adapter, bi, &adapter->params)) {
				err = -ENODEV;	  /* Can't handle this chip rev */
				goto out_free_dev;
			}

			adapter->name = pci_name(pdev);
			adapter->msg_enable = dflt_msg_enable;
			adapter->mmio_len = mmio_len;

			init_MUTEX(&adapter->mib_mutex);
			spin_lock_init(&adapter->tpi_lock);
			spin_lock_init(&adapter->work_lock);
			spin_lock_init(&adapter->async_lock);

			INIT_WORK(&adapter->ext_intr_handler_task,
				  ext_intr_task, adapter);
			INIT_WORK(&adapter->stats_update_task, mac_stats_task,
				  adapter);
#ifdef work_struct
			init_timer(&adapter->stats_update_timer);
			adapter->stats_update_timer.function = mac_stats_timer;
			adapter->stats_update_timer.data =
				(unsigned long)adapter;
#endif

			pci_set_drvdata(pdev, netdev);
		}

		pi = &adapter->port[i];
		pi->dev = netdev;
		netif_carrier_off(netdev);
		netdev->irq = pdev->irq;
		netdev->if_port = i;
		netdev->mem_start = mmio_start;
		netdev->mem_end = mmio_start + mmio_len - 1;
		netdev->priv = adapter;
		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
		netdev->features |= NETIF_F_LLTX;

		adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
		if (pci_using_dac)
			netdev->features |= NETIF_F_HIGHDMA;
		if (vlan_tso_capable(adapter)) {
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
			adapter->flags |= VLAN_ACCEL_CAPABLE;
			netdev->features |=
				NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
			netdev->vlan_rx_register = vlan_rx_register;
			netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
#endif
			adapter->flags |= TSO_CAPABLE;
			netdev->features |= NETIF_F_TSO;
		}

		netdev->open = cxgb_open;
		netdev->stop = cxgb_close;
		netdev->hard_start_xmit = t1_start_xmit;
		netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
			sizeof(struct cpl_tx_pkt_lso) :
			sizeof(struct cpl_tx_pkt);
		netdev->get_stats = t1_get_stats;
		netdev->set_multicast_list = t1_set_rxmode;
		netdev->do_ioctl = t1_ioctl;
		netdev->change_mtu = t1_change_mtu;
		netdev->set_mac_address = t1_set_mac_addr;
#ifdef CONFIG_NET_POLL_CONTROLLER
		netdev->poll_controller = t1_netpoll;
#endif
		netdev->weight = 64;

        SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
	}

	if (t1_init_sw_modules(adapter, bi) < 0) {
		err = -ENODEV;
		goto out_free_dev;
	}

	/*
	 * The card is now ready to go.  If any errors occur during device
	 * registration we do not fail the whole card but rather proceed only
	 * with the ports we manage to register successfully.  However we must
	 * register at least one net device.
	 */
	for (i = 0; i < bi->port_number; ++i) {
		err = register_netdev(adapter->port[i].dev);
		if (err)
			CH_WARN("%s: cannot register net device %s, skipping\n",
				pci_name(pdev), adapter->port[i].dev->name);
		else {
			/*
			 * Change the name we use for messages to the name of
			 * the first successfully registered interface.
			 */
			if (!adapter->registered_device_map)
				adapter->name = adapter->port[i].dev->name;

                __set_bit(i, &adapter->registered_device_map);
		}
	}
	if (!adapter->registered_device_map) {
		CH_ERR("%s: could not register any net devices\n",
		       pci_name(pdev));
		goto out_release_adapter_res;
	}

	printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
	       bi->desc, adapter->params.chip_revision,
	       adapter->params.pci.is_pcix ? "PCIX" : "PCI",
	       adapter->params.pci.speed, adapter->params.pci.width);
	return 0;

 out_release_adapter_res:
	t1_free_sw_modules(adapter);
 out_free_dev:
	if (adapter) {
		if (adapter->regs) iounmap(adapter->regs);
		for (i = bi->port_number - 1; i >= 0; --i)
			if (adapter->port[i].dev) {
				cxgb_proc_cleanup(adapter, proc_root_driver);
				kfree(adapter->port[i].dev);
			}
	}
	pci_release_regions(pdev);
 out_disable_pdev:
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	return err;
}
Example #25
0
static int __devinit solo6010_pci_probe(struct pci_dev *pdev,
                                        const struct pci_device_id *id)
{
    struct solo6010_dev *solo_dev;
    int ret;
    u8 chip_id;

    solo_dev = kzalloc(sizeof(*solo_dev), GFP_KERNEL);
    if (solo_dev == NULL)
        return -ENOMEM;

    if (id->driver_data == SOLO_DEV_6010)
        dev_info(&pdev->dev, "Probing Softlogic 6010\n");
    else
        dev_info(&pdev->dev, "Probing Softlogic 6110\n");

    solo_dev->type = id->driver_data;
    solo_dev->pdev = pdev;
    spin_lock_init(&solo_dev->reg_io_lock);
    pci_set_drvdata(pdev, solo_dev);

    /* Only for during init */
    solo_dev->p2m_jiffies = msecs_to_jiffies(100);

    if ((ret = pci_enable_device(pdev)))
        goto fail_probe;

    pci_set_master(pdev);

    /* RETRY/TRDY Timeout disabled */
    pci_write_config_byte(pdev, 0x40, 0x00);
    pci_write_config_byte(pdev, 0x41, 0x00);

    if ((ret = pci_request_regions(pdev, SOLO6010_NAME)))
        goto fail_probe;

    if ((solo_dev->reg_base = pci_ioremap_bar(pdev, 0)) == NULL) {
        ret = -ENOMEM;
        goto fail_probe;
    }

    chip_id = solo_reg_read(solo_dev, SOLO_CHIP_OPTION) &
              SOLO_CHIP_ID_MASK;
    switch (chip_id) {
    case 7:
        solo_dev->nr_chans = 16;
        solo_dev->nr_ext = 5;
        break;
    case 6:
        solo_dev->nr_chans = 8;
        solo_dev->nr_ext = 2;
        break;
    default:
        dev_warn(&pdev->dev, "Invalid chip_id 0x%02x, "
                 "defaulting to 4 channels\n",
                 chip_id);
    case 5:
        solo_dev->nr_chans = 4;
        solo_dev->nr_ext = 1;
    }

    /* Disable all interrupts to start */
    solo_irq_off(solo_dev, ~0);

    /* Initial global settings */
    if (solo_dev->type == SOLO_DEV_6010) {
        solo_dev->clock_mhz = 108;
        solo_dev->sys_config = SOLO_SYS_CFG_SDRAM64BIT
                               | SOLO_SYS_CFG_INPUTDIV(25)
                               | SOLO_SYS_CFG_FEEDBACKDIV(solo_dev->clock_mhz * 2 - 2)
                               | SOLO_SYS_CFG_OUTDIV(3);
        solo_reg_write(solo_dev, SOLO_SYS_CFG, solo_dev->sys_config);
    } else {
        u32 divq, divf;

        solo_dev->clock_mhz = 135;

        if (solo_dev->clock_mhz < 125) {
            divq = 3;
            divf = (solo_dev->clock_mhz * 4) / 3 - 1;
        } else {
            divq = 2;
            divf = (solo_dev->clock_mhz * 2) / 3 - 1;
        }

        solo_reg_write(solo_dev, SOLO_PLL_CONFIG,
                       (1 << 20) | /* PLL_RANGE */
                       (8 << 15) | /* PLL_DIVR  */
                       (divq << 12 ) |
                       (divf <<  4 ) |
                       (1 <<  1)   /* PLL_FSEN */ );

        solo_dev->sys_config = SOLO_SYS_CFG_SDRAM64BIT;
    }

    solo_reg_write(solo_dev, SOLO_SYS_CFG, solo_dev->sys_config);
    solo_reg_write(solo_dev, SOLO_TIMER_CLOCK_NUM,
                   solo_dev->clock_mhz - 1);

    /* PLL locking time of 1ms */
    mdelay(1);

    ret = request_irq(pdev->irq, solo6010_isr, IRQF_SHARED, SOLO6010_NAME,
                      solo_dev);
    if (ret)
        goto fail_probe;

    /* Handle this from the start */
    solo_irq_on(solo_dev, SOLO_IRQ_PCI_ERR);

    if ((ret = solo_i2c_init(solo_dev)))
        goto fail_probe;

    /* Setup the DMA engine */
    solo_reg_write(solo_dev, SOLO_DMA_CTRL,
                   SOLO_DMA_CTRL_REFRESH_CYCLE(1) |
                   SOLO_DMA_CTRL_SDRAM_SIZE(2) |
                   SOLO_DMA_CTRL_SDRAM_CLK_INVERT |
                   SOLO_DMA_CTRL_READ_CLK_SELECT |
                   SOLO_DMA_CTRL_LATENCY(1));

    /* Undocumented crap */
    solo_reg_write(solo_dev, SOLO_DMA_CTRL1,
                   solo_dev->type == SOLO_DEV_6010 ? 0x100 : 0x300);

    if (solo_dev->type != SOLO_DEV_6010) {
        solo_dev->usec_lsb = 0x3f;
        solo_set_time(solo_dev);
    }

    /* Disable watchdog */
    solo_reg_write(solo_dev, SOLO_WATCHDOG, 0);

    /* Initialize sub components */

    if ((ret = solo_p2m_init(solo_dev)))
        goto fail_probe;

    if ((ret = solo_disp_init(solo_dev)))
        goto fail_probe;

    if ((ret = solo_gpio_init(solo_dev)))
        goto fail_probe;

    if ((ret = solo_tw28_init(solo_dev)))
        goto fail_probe;

    if ((ret = solo_v4l2_init(solo_dev, video_nr)))
        goto fail_probe;

    if ((ret = solo_enc_init(solo_dev)))
        goto fail_probe;

    if ((ret = solo_enc_v4l2_init(solo_dev, video_nr)))
        goto fail_probe;

    if ((ret = solo_g723_init(solo_dev)))
        goto fail_probe;

    if ((ret = solo_sysfs_init(solo_dev)))
        goto fail_probe;

    /* Now that init is over, set this lower */
    solo_dev->p2m_jiffies = msecs_to_jiffies(20);

    return 0;

fail_probe:
    free_solo_dev(solo_dev);
    return ret;
}
Example #26
0
static int orinoco_tmd_init_one(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	int err = 0;
	u32 reg, addr;
	struct orinoco_private *priv = NULL;
	unsigned long pccard_ioaddr = 0;
	unsigned long pccard_iolen = 0;
	struct net_device *dev = NULL;

	err = pci_enable_device(pdev);
	if (err)
		return -EIO;

	printk(KERN_DEBUG "TMD setup\n");
	pccard_ioaddr = pci_resource_start(pdev, 2);
	pccard_iolen = pci_resource_len(pdev, 2);
	if (! request_region(pccard_ioaddr, pccard_iolen, dev_info)) {
		printk(KERN_ERR "orinoco_tmd: I/O resource at 0x%lx len 0x%lx busy\n",
			pccard_ioaddr, pccard_iolen);
		pccard_ioaddr = 0;
		err = -EBUSY;
		goto fail;
	}
	addr = pci_resource_start(pdev, 1);
	outb(COR_VALUE, addr);
	mdelay(1);
	reg = inb(addr);
	if (reg != COR_VALUE) {
		printk(KERN_ERR "orinoco_tmd: Error setting TMD COR values %x should be %x\n", reg, COR_VALUE);
		err = -EIO;
		goto fail;
	}

	dev = alloc_orinocodev(0, NULL);
	if (! dev) {
		err = -ENOMEM;
		goto fail;
	}

	priv = dev->priv;
	dev->base_addr = pccard_ioaddr;
	SET_MODULE_OWNER(dev);

	printk(KERN_DEBUG
	       "Detected Orinoco/Prism2 TMD device at %s irq:%d, io addr:0x%lx\n",
	       pci_name(pdev), pdev->irq, pccard_ioaddr);

	hermes_struct_init(&(priv->hw), dev->base_addr,
			HERMES_IO, HERMES_16BIT_REGSPACING);
	pci_set_drvdata(pdev, dev);

	err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name,
			  dev);
	if (err) {
		printk(KERN_ERR "orinoco_tmd: Error allocating IRQ %d.\n",
		       pdev->irq);
		err = -EBUSY;
		goto fail;
	}
	dev->irq = pdev->irq;

	err = register_netdev(dev);
	if (err)
		goto fail;

	return 0;		/* succeeded */

 fail:	
	printk(KERN_DEBUG "orinoco_tmd: init_one(), FAIL!\n");

	if (dev) {
		if (dev->irq)
			free_irq(dev->irq, dev);
		
		kfree(dev);
	}

	if (pccard_ioaddr)
		release_region(pccard_ioaddr, pccard_iolen);

	pci_disable_device(pdev);

	return err;
}
Example #27
0
static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev)
{
	avmcard *card;
	avmctrl_info *cinfo;
	int retval;

	card = b1_alloc_card(1);
	if (!card) {
		printk(KERN_WARNING "t1pci: no memory.\n");
		retval = -ENOMEM;
		goto err;
	}

        card->dma = avmcard_dma_alloc("t1pci", pdev, 2048+128, 2048+128);
	if (!card->dma) {
		printk(KERN_WARNING "t1pci: no memory.\n");
		retval = -ENOMEM;
		goto err_free;
	}

	cinfo = card->ctrlinfo;
	sprintf(card->name, "t1pci-%x", p->port);
	card->port = p->port;
	card->irq = p->irq;
	card->membase = p->membase;
	card->cardtype = avm_t1pci;

	if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
		printk(KERN_WARNING "t1pci: ports 0x%03x-0x%03x in use.\n",
		       card->port, card->port + AVMB1_PORTLEN);
		retval = -EBUSY;
		goto err_free_dma;
	}

	card->mbase = ioremap(card->membase, 64);
	if (!card->mbase) {
		printk(KERN_NOTICE "t1pci: can't remap memory at 0x%lx\n",
		       card->membase);
		retval = -EIO;
		goto err_release_region;
	}

	b1dma_reset(card);

	retval = t1pci_detect(card);
	if (retval != 0) {
		if (retval < 6)
			printk(KERN_NOTICE "t1pci: NO card at 0x%x (%d)\n",
			       card->port, retval);
		else
			printk(KERN_NOTICE "t1pci: card at 0x%x, but cable not connected or T1 has no power (%d)\n",
			       card->port, retval);
		retval = -EIO;
		goto err_unmap;
	}
	b1dma_reset(card);

	retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
	if (retval) {
		printk(KERN_ERR "t1pci: unable to get IRQ %d.\n", card->irq);
		retval = -EBUSY;
		goto err_unmap;
	}

	cinfo->capi_ctrl.owner         = THIS_MODULE;
	cinfo->capi_ctrl.driver_name   = "t1pci";
	cinfo->capi_ctrl.driverdata    = cinfo;
	cinfo->capi_ctrl.register_appl = b1dma_register_appl;
	cinfo->capi_ctrl.release_appl  = b1dma_release_appl;
	cinfo->capi_ctrl.send_message  = b1dma_send_message;
	cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
	cinfo->capi_ctrl.reset_ctr     = b1dma_reset_ctr;
	cinfo->capi_ctrl.procinfo      = t1pci_procinfo;
	cinfo->capi_ctrl.ctr_read_proc = b1dmactl_read_proc;
	strcpy(cinfo->capi_ctrl.name, card->name);

	retval = attach_capi_ctr(&cinfo->capi_ctrl);
	if (retval) {
		printk(KERN_ERR "t1pci: attach controller failed.\n");
		retval = -EBUSY;
		goto err_free_irq;
	}
	card->cardnr = cinfo->capi_ctrl.cnr;

	printk(KERN_INFO "t1pci: AVM T1 PCI at i/o %#x, irq %d, mem %#lx\n",
	       card->port, card->irq, card->membase);

	pci_set_drvdata(pdev, card);
	return 0;

 err_free_irq:
	free_irq(card->irq, card);
 err_unmap:
	iounmap(card->mbase);
 err_release_region:
	release_region(card->port, AVMB1_PORTLEN);
 err_free_dma:
	avmcard_dma_free(card->dma);
 err_free:
	b1_free_card(card);
 err:
	return retval;
}
/**
 * mei_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in mei_txe_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
    struct mei_device *dev;
    struct mei_txe_hw *hw;
    int err;
    int i;

    /* enable pci dev */
    err = pci_enable_device(pdev);
    if (err) {
        dev_err(&pdev->dev, "failed to enable pci device.\n");
        goto end;
    }
    /* set PCI host mastering  */
    pci_set_master(pdev);
    /* pci request regions for mei driver */
    err = pci_request_regions(pdev, KBUILD_MODNAME);
    if (err) {
        dev_err(&pdev->dev, "failed to get pci regions.\n");
        goto disable_device;
    }

    err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
    if (err) {
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
        if (err) {
            dev_err(&pdev->dev, "No suitable DMA available.\n");
            goto release_regions;
        }
    }

    /* allocates and initializes the mei dev structure */
    dev = mei_txe_dev_init(pdev);
    if (!dev) {
        err = -ENOMEM;
        goto release_regions;
    }
    hw = to_txe_hw(dev);


    err = mei_reserver_dma_acpi(dev);
    if (err)
        err = mei_alloc_dma(dev);
    if (err)
        goto free_device;

    /* mapping  IO device memory */
    for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
        hw->mem_addr[i] = pci_iomap(pdev, i, 0);
        if (!hw->mem_addr[i]) {
            dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
            err = -ENOMEM;
            goto free_device;
        }
    }


    pci_enable_msi(pdev);

    /* clear spurious interrupts */
    mei_clear_interrupts(dev);

    /* request and enable interrupt  */
    if (pci_dev_msi_enabled(pdev))
        err = request_threaded_irq(pdev->irq,
                                   NULL,
                                   mei_txe_irq_thread_handler,
                                   IRQF_ONESHOT, KBUILD_MODNAME, dev);
    else
        err = request_threaded_irq(pdev->irq,
                                   mei_txe_irq_quick_handler,
                                   mei_txe_irq_thread_handler,
                                   IRQF_SHARED, KBUILD_MODNAME, dev);
    if (err) {
        dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
                pdev->irq);
        goto free_device;
    }

    if (mei_start(dev)) {
        dev_err(&pdev->dev, "init hw failure.\n");
        err = -ENODEV;
        goto release_irq;
    }

    err = mei_txe_setup_satt2(dev,
                              dma_to_phys(&dev->pdev->dev, hw->pool_paddr), hw->pool_size);
    if (err)
        goto release_irq;


    err = mei_register(dev);
    if (err)
        goto release_irq;

    pci_set_drvdata(pdev, dev);

    hw->mdev = mei_mm_init(&dev->pdev->dev,
                           hw->pool_vaddr, hw->pool_paddr, hw->pool_size);

    if (IS_ERR_OR_NULL(hw->mdev))
        goto deregister_mei;

    pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
    pm_runtime_use_autosuspend(&pdev->dev);

    pm_runtime_mark_last_busy(&pdev->dev);

    /*
    * For not wake-able HW runtime pm framework
    * can't be used on pci device level.
    * Use domain runtime pm callbacks instead.
    */
    if (!pci_dev_run_wake(pdev))
        mei_txe_set_pm_domain(dev);

    pm_runtime_put_noidle(&pdev->dev);

    if (!nopg)
        pm_runtime_allow(&pdev->dev);

    return 0;

deregister_mei:
    mei_deregister(dev);
release_irq:

    mei_cancel_work(dev);

    /* disable interrupts */
    mei_disable_interrupts(dev);

    free_irq(pdev->irq, dev);
    pci_disable_msi(pdev);

free_device:
    if (hw->pool_release)
        hw->pool_release(hw);

    mei_txe_pci_iounmap(pdev, hw);

    kfree(dev);
release_regions:
    pci_release_regions(pdev);
disable_device:
    pci_disable_device(pdev);
end:
    dev_err(&pdev->dev, "initialization failed.\n");
    return err;
}
Example #29
0
static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
{	
	static int versionprinted;
	struct net_device *dev;
	struct net_local *tp;
	int ret;
	unsigned int pci_irq_line;
	unsigned long pci_ioaddr;
	struct card_info *cardinfo = &card_info_table[ent->driver_data];

	if (versionprinted++ == 0)
;

	if (pci_enable_device(pdev))
		return -EIO;

	/* Remove I/O space marker in bit 0. */
	pci_irq_line = pdev->irq;
	pci_ioaddr = pci_resource_start (pdev, 0);

	/* At this point we have found a valid card. */
	dev = alloc_trdev(sizeof(struct net_local));
	if (!dev)
		return -ENOMEM;

	if (!request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, dev->name)) {
		ret = -EBUSY;
		goto err_out_trdev;
	}

	dev->base_addr	= pci_ioaddr;
	dev->irq 	= pci_irq_line;
	dev->dma	= 0;

	dev_info(&pdev->dev, "%s\n", cardinfo->name);
	dev_info(&pdev->dev, "    IO: %#4lx  IRQ: %d\n", dev->base_addr, dev->irq);
		
	tms_pci_read_eeprom(dev);

	dev_info(&pdev->dev, "    Ring Station Address: %pM\n", dev->dev_addr);
		
	ret = tmsdev_init(dev, &pdev->dev);
	if (ret) {
		dev_info(&pdev->dev, "unable to get memory for dev->priv.\n");
		goto err_out_region;
	}

	tp = netdev_priv(dev);
	tp->setnselout = tms_pci_setnselout_pins;
		
	tp->sifreadb = tms_pci_sifreadb;
	tp->sifreadw = tms_pci_sifreadw;
	tp->sifwriteb = tms_pci_sifwriteb;
	tp->sifwritew = tms_pci_sifwritew;
		
	memcpy(tp->ProductID, cardinfo->name, PROD_ID_SIZE + 1);

	tp->tmspriv = cardinfo;

	dev->netdev_ops = &tms380tr_netdev_ops;

	ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
			  dev->name, dev);
	if (ret)
		goto err_out_tmsdev;

	pci_set_drvdata(pdev, dev);
	SET_NETDEV_DEV(dev, &pdev->dev);

	ret = register_netdev(dev);
	if (ret)
		goto err_out_irq;
	
	return 0;

err_out_irq:
	free_irq(pdev->irq, dev);
err_out_tmsdev:
	pci_set_drvdata(pdev, NULL);
	tmsdev_term(dev);
err_out_region:
	release_region(pci_ioaddr, TMS_PCI_IO_EXTENT);
err_out_trdev:
	free_netdev(dev);
	return ret;
}
Example #30
0
static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct pvscsi_adapter *adapter;
	struct pvscsi_adapter adapter_temp;
	struct Scsi_Host *host = NULL;
	unsigned int i;
	unsigned long flags = 0;
	int error;
	u32 max_id;

	error = -ENODEV;

	if (pci_enable_device(pdev))
		return error;

	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
	    pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
	} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
		   pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
	} else {
		printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
		goto out_disable_device;
	}

	/*
	 * Let's use a temp pvscsi_adapter struct until we find the number of
	 * targets on the adapter, after that we will switch to the real
	 * allocated struct.
	 */
	adapter = &adapter_temp;
	memset(adapter, 0, sizeof(*adapter));
	adapter->dev  = pdev;
	adapter->rev = pdev->revision;

	if (pci_request_regions(pdev, "vmw_pvscsi")) {
		printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
		goto out_disable_device;
	}

	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
		if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
			continue;

		if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
			continue;

		break;
	}

	if (i == DEVICE_COUNT_RESOURCE) {
		printk(KERN_ERR
		       "vmw_pvscsi: adapter has no suitable MMIO region\n");
		goto out_release_resources_and_disable;
	}

	adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);

	if (!adapter->mmioBase) {
		printk(KERN_ERR
		       "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
		       i, PVSCSI_MEM_SPACE_SIZE);
		goto out_release_resources_and_disable;
	}

	pci_set_master(pdev);

	/*
	 * Ask the device for max number of targets before deciding the
	 * default pvscsi_ring_pages value.
	 */
	max_id = pvscsi_get_max_targets(adapter);
	printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id);

	if (pvscsi_ring_pages == 0)
		/*
		 * Set the right default value. Up to 16 it is 8, above it is
		 * max.
		 */
		pvscsi_ring_pages = (max_id > 16) ?
			PVSCSI_SETUP_RINGS_MAX_NUM_PAGES :
			PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
	printk(KERN_INFO
	       "vmw_pvscsi: setting ring_pages to %d\n",
	       pvscsi_ring_pages);

	pvscsi_template.can_queue =
		min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
		PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
	pvscsi_template.cmd_per_lun =
		min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
	host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
	if (!host) {
		printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
		goto out_release_resources_and_disable;
	}

	/*
	 * Let's use the real pvscsi_adapter struct here onwards.
	 */
	adapter = shost_priv(host);
	memset(adapter, 0, sizeof(*adapter));
	adapter->dev  = pdev;
	adapter->host = host;
	/*
	 * Copy back what we already have to the allocated adapter struct.
	 */
	adapter->rev = adapter_temp.rev;
	adapter->mmioBase = adapter_temp.mmioBase;

	spin_lock_init(&adapter->hw_lock);
	host->max_channel = 0;
	host->max_lun     = 1;
	host->max_cmd_len = 16;
	host->max_id      = max_id;

	pci_set_drvdata(pdev, host);

	ll_adapter_reset(adapter);

	adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);

	error = pvscsi_allocate_rings(adapter);
	if (error) {
		printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
		goto out_release_resources;
	}

	/*
	 * From this point on we should reset the adapter if anything goes
	 * wrong.
	 */
	pvscsi_setup_all_rings(adapter);

	adapter->cmd_map = kcalloc(adapter->req_depth,
				   sizeof(struct pvscsi_ctx), GFP_KERNEL);
	if (!adapter->cmd_map) {
		printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
		error = -ENOMEM;
		goto out_reset_adapter;
	}

	INIT_LIST_HEAD(&adapter->cmd_pool);
	for (i = 0; i < adapter->req_depth; i++) {
		struct pvscsi_ctx *ctx = adapter->cmd_map + i;
		list_add(&ctx->list, &adapter->cmd_pool);
	}

	error = pvscsi_allocate_sg(adapter);
	if (error) {
		printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
		goto out_reset_adapter;
	}

	if (!pvscsi_disable_msix &&
	    pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
		adapter->use_msix = 1;
	} else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using MSI\n");
		adapter->use_msi = 1;
		adapter->irq = pdev->irq;
	} else {
		printk(KERN_INFO "vmw_pvscsi: using INTx\n");
		adapter->irq = pdev->irq;
		flags = IRQF_SHARED;
	}

	adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
	printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
	       adapter->use_req_threshold ? "en" : "dis");

	error = request_irq(adapter->irq, pvscsi_isr, flags,
			    "vmw_pvscsi", adapter);
	if (error) {
		printk(KERN_ERR
		       "vmw_pvscsi: unable to request IRQ: %d\n", error);
		adapter->irq = 0;
		goto out_reset_adapter;
	}

	error = scsi_add_host(host, &pdev->dev);
	if (error) {
		printk(KERN_ERR
		       "vmw_pvscsi: scsi_add_host failed: %d\n", error);
		goto out_reset_adapter;
	}

	dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
		 adapter->rev, host->host_no);

	pvscsi_unmask_intr(adapter);

	scsi_scan_host(host);

	return 0;

out_reset_adapter:
	ll_adapter_reset(adapter);
out_release_resources:
	pvscsi_release_resources(adapter);
	scsi_host_put(host);
out_disable_device:
	pci_disable_device(pdev);

	return error;

out_release_resources_and_disable:
	pvscsi_release_resources(adapter);
	goto out_disable_device;
}