static int cs5530_init_chip(void)
{
    struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL;

    while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
        switch (dev->device) {
        case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
            master_0 = pci_dev_get(dev);
            break;
        case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
            cs5530_0 = pci_dev_get(dev);
            break;
        }
    }
    if (!master_0) {
        printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
        goto fail_put;
    }
    if (!cs5530_0) {
        printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
        goto fail_put;
    }

    pci_set_master(cs5530_0);
    pci_try_set_mwi(cs5530_0);


    pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);


    pci_write_config_word(cs5530_0, 0xd0, 0x5006);


    pci_write_config_byte(master_0, 0x40, 0x1e);


    pci_write_config_byte(master_0, 0x41, 0x14);


    pci_write_config_byte(master_0, 0x42, 0x00);
    pci_write_config_byte(master_0, 0x43, 0xc1);

    pci_dev_put(master_0);
    pci_dev_put(cs5530_0);
    return 0;
fail_put:
    if (master_0)
        pci_dev_put(master_0);
    if (cs5530_0)
        pci_dev_put(cs5530_0);
    return -ENODEV;
}
예제 #2
0
int  vpci_bus_init(void)
{
	struct pci_dev *pcidev = NULL;
	sysdata = kzalloc(sizeof(void *), GFP_KERNEL);
		vbus = pci_scan_bus_parented(NULL, 2, & vpci_ops, sysdata);
		//vbus = pci_create_root_bus(NULL,i,& vpci_ops, sysdata,NULL);
		//if (vbus != NULL)
			//break;
		memset(sysdata, 0, sizeof(void *));
	if (vbus == NULL) {
		kfree(sysdata);
		return -EINVAL;
	}
	if (pci_register_driver(& vpci_vdev_driver) < 0) {
		pci_remove_bus(vbus);
		vbus = NULL;
		return -EINVAL;
	}
	pcidev = pci_scan_single_device(vbus, 0);

        if (pcidev == NULL)
                return 0;
        else
                pci_dev_get(pcidev);
	
	pci_bus_add_devices(vbus);
	
	return 0;
}
예제 #3
0
void mite_init(void)
{
	struct pci_dev *pcidev;
	struct mite_struct *mite;

	for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
		pcidev != NULL;
		pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
		if (pcidev->vendor == PCI_VENDOR_ID_NATINST) {
			unsigned i;

			mite = kzalloc(sizeof(*mite), GFP_KERNEL);
			if (!mite) {
				printk("mite: allocation failed\n");
				pci_dev_put(pcidev);
				return;
			}
			spin_lock_init(&mite->lock);
			mite->pcidev = pci_dev_get(pcidev);
			for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
				mite->channels[i].mite = mite;
				mite->channels[i].channel = i;
				mite->channels[i].done = 1;
			}
			mite->next = mite_devices;
			mite_devices = mite;
		}
	}
}
예제 #4
0
static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
		struct pci_dev *pdev)
{
	static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
	static const u8 duplex[] = { 0, 1, 2, 1, 2, 2 };
	static const u16 speed[] = { 0, 10, 10, 100, 100, 1000 };

	struct et131x_adapter *etdev;

	/* Setup the fundamental net_device and private adapter structure elements  */
	SET_NETDEV_DEV(netdev, &pdev->dev);

	/* Allocate private adapter struct and copy in relevant information */
	etdev = netdev_priv(netdev);
	etdev->pdev = pci_dev_get(pdev);
	etdev->netdev = netdev;

	/* Do the same for the netdev struct */
	netdev->irq = pdev->irq;
	netdev->base_addr = pci_resource_start(pdev, 0);

	/* Initialize spinlocks here */
	spin_lock_init(&etdev->Lock);
	spin_lock_init(&etdev->TCBSendQLock);
	spin_lock_init(&etdev->TCBReadyQLock);
	spin_lock_init(&etdev->send_hw_lock);
	spin_lock_init(&etdev->rcv_lock);
	spin_lock_init(&etdev->RcvPendLock);
	spin_lock_init(&etdev->FbrLock);
	spin_lock_init(&etdev->PHYLock);

	/* Parse configuration parameters into the private adapter struct */
	if (et131x_speed_set)
		dev_info(&etdev->pdev->dev,
			"Speed set manually to : %d\n", et131x_speed_set);

	etdev->SpeedDuplex = et131x_speed_set;
	etdev->RegistryJumboPacket = 1514;	/* 1514-9216 */

	/* Set the MAC address to a default */
	memcpy(etdev->addr, default_mac, ETH_ALEN);

	/* Decode SpeedDuplex
	 *
	 * Set up as if we are auto negotiating always and then change if we
	 * go into force mode
	 *
	 * If we are the 10/100 device, and gigabit is somehow requested then
	 * knock it down to 100 full.
	 */
	if (etdev->pdev->device == ET131X_PCI_DEVICE_ID_FAST &&
	    etdev->SpeedDuplex == 5)
		etdev->SpeedDuplex = 4;

	etdev->AiForceSpeed = speed[etdev->SpeedDuplex];
	etdev->AiForceDpx = duplex[etdev->SpeedDuplex];	/* Auto FDX */

	return etdev;
}
예제 #5
0
static int pmc_probe(struct pci_dev *pdev,
			  const struct pci_device_id *unused)
{
	struct pmc_dev *pmc;
	int ret;

	ret = pci_enable_device(pdev);
	if (ret < 0) {
		dev_err(&pdev->dev, "error: could not enable device\n");
		goto err_enable_device;
	}

	ret = pci_request_regions(pdev, DRIVER_NAME);
	if (ret) {
		dev_err(&pdev->dev, "error: could not request PCI region\n");
		goto err_request_regions;
	}

	pmc = devm_kzalloc(&pdev->dev, sizeof(struct pmc_dev), GFP_KERNEL);
	if (!pmc) {
		ret = -ENOMEM;
		goto err_devm_kzalloc;
	}

	pmc->pdev = pci_dev_get(pdev);

	pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
	pmc->base_addr &= PMC_BASE_ADDR_MASK;

	pmc->regmap = devm_ioremap_nocache(&pdev->dev,
		pmc->base_addr, PMC_MMIO_REG_LEN);
	if (!pmc->regmap) {
		dev_err(&pdev->dev, "error: ioremap failed\n");
		ret = -ENOMEM;
		goto err_devm_ioremap;
	}
	pci_set_drvdata(pdev, pmc);
#ifdef CONFIG_DEBUG_FS
	pmc_dbgfs_register(pmc);
#endif /* CONFIG_DEBUG_FS */

	/* Install power off function */
	pci_read_config_dword(pdev, ACPI_BASE_ADDR_OFFSET, &acpi_base_addr);
	acpi_base_addr &= ACPI_BASE_ADDR_MASK;
	if (acpi_base_addr != 0 && pm_power_off == NULL)
		pm_power_off = pmc_power_off;
	return 0;
err_devm_ioremap:
	pci_dev_put(pdev);
err_devm_kzalloc:
	pci_release_regions(pdev);
err_request_regions:
	pci_disable_device(pdev);
err_enable_device:
	dev_err(&pdev->dev, "error: probe failed\n");
	return ret;
}
예제 #6
0
static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct amd8131_dev_info *dev_info;

	for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
		dev_info++)
		if (dev_info->devfn == dev->devfn)
			break;

	if (dev_info->inst == NO_BRIDGE) 
		return -ENODEV;

	
	dev_info->dev = pci_dev_get(dev);

	if (pci_enable_device(dev_info->dev)) {
		pci_dev_put(dev_info->dev);
		printk(KERN_ERR "failed to enable:"
			"vendor %x, device %x, devfn %x, name %s\n",
			PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
			dev_info->devfn, dev_info->ctl_name);
		return -ENODEV;
	}

	
	dev_info->edac_idx = edac_pci_alloc_index();
	dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name);
	if (!dev_info->edac_dev)
		return -ENOMEM;

	dev_info->edac_dev->pvt_info = dev_info;
	dev_info->edac_dev->dev = &dev_info->dev->dev;
	dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
	dev_info->edac_dev->ctl_name = dev_info->ctl_name;
	dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);

	if (edac_op_state == EDAC_OPSTATE_POLL)
		dev_info->edac_dev->edac_check = amd8131_chipset.check;

	if (amd8131_chipset.init)
		amd8131_chipset.init(dev_info);

	if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) {
		printk(KERN_ERR "failed edac_pci_add_device() for %s\n",
			dev_info->ctl_name);
		edac_pci_free_ctl_info(dev_info->edac_dev);
		return -ENODEV;
	}

	printk(KERN_INFO "added one device on AMD8131 "
		"vendor %x, device %x, devfn %x, name %s\n",
		PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
		dev_info->devfn, dev_info->ctl_name);

	return 0;
}
예제 #7
0
bm_pci_device_t* bm_pci_alloc(struct pci_dev* pdev)
{
	bm_pci_device_t* pci = kzalloc(sizeof(bm_pci_device_t), GFP_KERNEL);
	if (!pci)
		return NULL;

	pci->pdev = pci_dev_get(pdev);
	kref_init(&pci->ref);
	return pci;
}
예제 #8
0
struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
				struct pci_dev *from)
{
	struct pci_dev *pdev;

	pci_dev_get(from);
	pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
	pci_dev_put(pdev);
	return pdev;
}
예제 #9
0
파일: drm_drv.c 프로젝트: Ionic/nx-libs
/**
 * Module initialization. Called via init_module at module load time, or via
 * linux/init/main.c (this is not currently supported).
 *
 * \return zero on success or a negative number on failure.
 *
 * Initializes an array of drm_device structures, and attempts to
 * initialize all available devices, using consecutive minors, registering the
 * stubs and initializing the AGP device.
 *
 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
 * after the initialization for driver customization.
 */
int drm_init(struct drm_driver *driver,
		       struct pci_device_id *pciidlist)
{
	struct pci_dev *pdev;
	struct pci_device_id *pid;
	int rc, i;

	DRM_DEBUG("\n");

	for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) {
		pid = &pciidlist[i];

		pdev = NULL;
		/* pass back in pdev to account for multiple identical cards */
		while ((pdev =
			pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
				       pid->subdevice, pdev))) {
			/* is there already a driver loaded, or (short circuit saves work) */
			/* does something like VesaFB have control of the memory region? */
			if (pci_dev_driver(pdev)
			    || pci_request_regions(pdev, "DRM scan")) {
				/* go into stealth mode */
				drm_fb_loaded = 1;
				pci_dev_put(pdev);
				break;
			}
			/* no fbdev or vesadev, put things back and wait for normal probe */
			pci_release_regions(pdev);
		}
	}

	if (!drm_fb_loaded)
		pci_register_driver(&driver->pci_driver);
	else {
		for (i = 0; pciidlist[i].vendor != 0; i++) {
			pid = &pciidlist[i];

			pdev = NULL;
			/* pass back in pdev to account for multiple identical cards */
			while ((pdev =
				pci_get_subsys(pid->vendor, pid->device,
					       pid->subvendor, pid->subdevice,
					       pdev))) {
				/* stealth mode requires a manual probe */
				pci_dev_get(pdev);
				if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) {
					pci_dev_put(pdev);
					return rc;
				}
			}
		}
		DRM_INFO("Used old pci detect: framebuffer loaded\n");
	}
	return 0;
}
예제 #10
0
/* returns count (>= 0), or negative on error */
static int __devinit i3000_init_one(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	int rc;

	debugf0("MC: %s()\n", __func__);

	if (pci_enable_device(pdev) < 0)
		return -EIO;

	rc = i3000_probe1(pdev, ent->driver_data);
	if (mci_pdev == NULL)
		mci_pdev = pci_dev_get(pdev);

	return rc;
}
예제 #11
0
파일: sst_pci.c 프로젝트: 020gzh/linux
/*
 * intel_sst_probe - PCI probe function
 *
 * @pci:	PCI device structure
 * @pci_id: PCI device ID structure
 *
 */
static int intel_sst_probe(struct pci_dev *pci,
			const struct pci_device_id *pci_id)
{
	int ret = 0;
	struct intel_sst_drv *sst_drv_ctx;
	struct sst_platform_info *sst_pdata = pci->dev.platform_data;

	dev_dbg(&pci->dev, "Probe for DID %x\n", pci->device);
	ret = sst_alloc_drv_context(&sst_drv_ctx, &pci->dev, pci->device);
	if (ret < 0)
		return ret;

	sst_drv_ctx->pdata = sst_pdata;
	sst_drv_ctx->irq_num = pci->irq;
	snprintf(sst_drv_ctx->firmware_name, sizeof(sst_drv_ctx->firmware_name),
			"%s%04x%s", "fw_sst_",
			sst_drv_ctx->dev_id, ".bin");

	ret = sst_context_init(sst_drv_ctx);
	if (ret < 0)
		return ret;

	/* Init the device */
	ret = pcim_enable_device(pci);
	if (ret) {
		dev_err(sst_drv_ctx->dev,
			"device can't be enabled. Returned err: %d\n", ret);
		goto do_free_drv_ctx;
	}
	sst_drv_ctx->pci = pci_dev_get(pci);
	ret = sst_platform_get_resources(sst_drv_ctx);
	if (ret < 0)
		goto do_free_drv_ctx;

	pci_set_drvdata(pci, sst_drv_ctx);
	sst_configure_runtime_pm(sst_drv_ctx);

	return ret;

do_free_drv_ctx:
	sst_context_cleanup(sst_drv_ctx);
	dev_err(sst_drv_ctx->dev, "Probe failed with %d\n", ret);
	return ret;
}
예제 #12
0
/**
 *	rar_probe		-	PCI probe callback
 *	@dev: PCI device
 *	@id: matching entry in the match table
 *
 *	A RAR device has been discovered. Initialise it and if successful
 *	process any pending callbacks that can now be completed.
 */
static int rar_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	int error;
	struct rar_device *rar;

	dev_dbg(&dev->dev, "PCI probe starting\n");

	rar = alloc_rar_device();
	if (rar == NULL)
		return -EBUSY;

	/* Enable the device */
	error = pci_enable_device(dev);
	if (error) {
		dev_err(&dev->dev,
			"Error enabling RAR register PCI device\n");
		goto end_function;
	}

	/* Fill in the rar_device structure */
	rar->rar_dev = pci_dev_get(dev);
	pci_set_drvdata(dev, rar);

	/*
	 * Initialize the RAR parameters, which have to be retrieved
	 * via the message bus interface.
	 */
	error = init_rar_params(rar);
	if (error) {
		pci_disable_device(dev);
		dev_err(&dev->dev, "Error retrieving RAR addresses\n");
		goto end_function;
	}
	/* now call anyone who has registered (using callbacks) */
	rar_callback(rar);
	return 0;
end_function:
	free_rar_device(rar);
	return error;
}
예제 #13
0
파일: mite.c 프로젝트: 9072997/wikireader
void mite_init(void)
{
	struct pci_dev *pcidev;
	struct mite_struct *mite;

	for(pcidev = pci_get_device(PCI_VENDOR_ID_NATINST, PCI_ANY_ID,NULL);
		pcidev;
		pcidev = pci_get_device(PCI_VENDOR_ID_NATINST, PCI_ANY_ID, pcidev))
	{
		mite=kmalloc(sizeof(*mite),GFP_KERNEL);
		if(!mite){
			printk("mite: allocation failed\n");
			return;
		}
		memset(mite,0,sizeof(*mite));

		mite->pcidev=pcidev;
		pci_dev_get(mite->pcidev);
		mite->next=mite_devices;
		mite_devices=mite;
	}
}
예제 #14
0
/**
 * Module initialization. Called via init_module at module load time, or via
 * linux/init/main.c (this is not currently supported).
 *
 * \return zero on success or a negative number on failure.
 *
 * Initializes an array of drm_device structures, and attempts to
 * initialize all available devices, using consecutive minors, registering the
 * stubs and initializing the AGP device.
 * 
 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
 * after the initialization for driver customization.
 */
int drm_init( struct drm_driver *driver )
{
	struct pci_dev *pdev = NULL;
	struct pci_device_id *pid;
	int i;

	DRM_DEBUG( "\n" );

	drm_mem_init();

	for (i=0; driver->pci_driver.id_table[i].vendor != 0; i++) {
		pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
		
		pdev=NULL;
		/* pass back in pdev to account for multiple identical cards */		
		while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev)) != NULL) {
			/* stealth mode requires a manual probe */
			pci_dev_get(pdev);
			drm_get_dev(pdev, pid, driver);
		}
	}
	return 0;
}
예제 #15
0
static int falcon_probe_nic(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data;
	struct falcon_board *board;
	int rc;

	/* Allocate storage for hardware specific data */
	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
	if (!nic_data)
		return -ENOMEM;
	efx->nic_data = nic_data;

	rc = -ENODEV;

	if (efx_nic_fpga_ver(efx) != 0) {
		EFX_ERR(efx, "Falcon FPGA not supported\n");
		goto fail1;
	}

	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
		efx_oword_t nic_stat;
		struct pci_dev *dev;
		u8 pci_rev = efx->pci_dev->revision;

		if ((pci_rev == 0xff) || (pci_rev == 0)) {
			EFX_ERR(efx, "Falcon rev A0 not supported\n");
			goto fail1;
		}
		efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
		if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
			EFX_ERR(efx, "Falcon rev A1 1G not supported\n");
			goto fail1;
		}
		if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
			EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
			goto fail1;
		}

		dev = pci_dev_get(efx->pci_dev);
		while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
					     dev))) {
			if (dev->bus == efx->pci_dev->bus &&
			    dev->devfn == efx->pci_dev->devfn + 1) {
				nic_data->pci_dev2 = dev;
				break;
			}
		}
		if (!nic_data->pci_dev2) {
			EFX_ERR(efx, "failed to find secondary function\n");
			rc = -ENODEV;
			goto fail2;
		}
	}

	/* Now we can reset the NIC */
	rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
	if (rc) {
		EFX_ERR(efx, "failed to reset NIC\n");
		goto fail3;
	}

	/* Allocate memory for INT_KER */
	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
	if (rc)
		goto fail4;
	BUG_ON(efx->irq_status.dma_addr & 0x0f);

	EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
		(u64)efx->irq_status.dma_addr,
		efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));

	falcon_probe_spi_devices(efx);

	/* Read in the non-volatile configuration */
	rc = falcon_probe_nvconfig(efx);
	if (rc)
		goto fail5;

	/* Initialise I2C adapter */
	board = falcon_board(efx);
	board->i2c_adap.owner = THIS_MODULE;
	board->i2c_data = falcon_i2c_bit_operations;
	board->i2c_data.data = efx;
	board->i2c_adap.algo_data = &board->i2c_data;
	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
	strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
		sizeof(board->i2c_adap.name));
	rc = i2c_bit_add_bus(&board->i2c_adap);
	if (rc)
		goto fail5;

	rc = falcon_board(efx)->type->init(efx);
	if (rc) {
		EFX_ERR(efx, "failed to initialise board\n");
		goto fail6;
	}

	nic_data->stats_disable_count = 1;
	setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
		    (unsigned long)efx);

	return 0;

 fail6:
	BUG_ON(i2c_del_adapter(&board->i2c_adap));
	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
 fail5:
	falcon_remove_spi_devices(efx);
	efx_nic_free_buffer(efx, &efx->irq_status);
 fail4:
 fail3:
	if (nic_data->pci_dev2) {
		pci_dev_put(nic_data->pci_dev2);
		nic_data->pci_dev2 = NULL;
	}
 fail2:
 fail1:
	kfree(efx->nic_data);
	return rc;
}
예제 #16
0
파일: pata_cs5530.c 프로젝트: ivucica/linux
static int cs5530_init_chip(void)
{
	struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL;

	while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
		switch (dev->device) {
			case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
				master_0 = pci_dev_get(dev);
				break;
			case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
				cs5530_0 = pci_dev_get(dev);
				break;
		}
	}
	if (!master_0) {
		printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
		goto fail_put;
	}
	if (!cs5530_0) {
		printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
		goto fail_put;
	}

	pci_set_master(cs5530_0);
	pci_set_mwi(cs5530_0);

	/*
	 * Set PCI CacheLineSize to 16-bytes:
	 * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530
	 *
	 * Note: This value is constant because the 5530 is only a Geode companion
	 */

	pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);

	/*
	 * Disable trapping of UDMA register accesses (Win98 hack):
	 * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530
	 */

	pci_write_config_word(cs5530_0, 0xd0, 0x5006);

	/*
	 * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus:
	 * The other settings are what is necessary to get the register
	 * into a sane state for IDE DMA operation.
	 */

	pci_write_config_byte(master_0, 0x40, 0x1e);

	/*
	 * Set max PCI burst size (16-bytes seems to work best):
	 *	   16bytes: set bit-1 at 0x41 (reg value of 0x16)
	 *	all others: clear bit-1 at 0x41, and do:
	 *	  128bytes: OR 0x00 at 0x41
	 *	  256bytes: OR 0x04 at 0x41
	 *	  512bytes: OR 0x08 at 0x41
	 *	 1024bytes: OR 0x0c at 0x41
	 */

	pci_write_config_byte(master_0, 0x41, 0x14);

	/*
	 * These settings are necessary to get the chip
	 * into a sane state for IDE DMA operation.
	 */

	pci_write_config_byte(master_0, 0x42, 0x00);
	pci_write_config_byte(master_0, 0x43, 0xc1);

	pci_dev_put(master_0);
	pci_dev_put(cs5530_0);
	return 0;
fail_put:
	if (master_0)
		pci_dev_put(master_0);
	if (cs5530_0)
		pci_dev_put(cs5530_0);
	return -ENODEV;
}
예제 #17
0
파일: p54pci.c 프로젝트: 383530895/linux
static int p54p_probe(struct pci_dev *pdev,
				const struct pci_device_id *id)
{
	struct p54p_priv *priv;
	struct ieee80211_hw *dev;
	unsigned long mem_addr, mem_len;
	int err;

	pci_dev_get(pdev);
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
		return err;
	}

	mem_addr = pci_resource_start(pdev, 0);
	mem_len = pci_resource_len(pdev, 0);
	if (mem_len < sizeof(struct p54p_csr)) {
		dev_err(&pdev->dev, "Too short PCI resources\n");
		err = -ENODEV;
		goto err_disable_dev;
	}

	err = pci_request_regions(pdev, "p54pci");
	if (err) {
		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
		goto err_disable_dev;
	}

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (!err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (err) {
		dev_err(&pdev->dev, "No suitable DMA available\n");
		goto err_free_reg;
	}

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	pci_write_config_byte(pdev, 0x40, 0);
	pci_write_config_byte(pdev, 0x41, 0);

	dev = p54_init_common(sizeof(*priv));
	if (!dev) {
		dev_err(&pdev->dev, "ieee80211 alloc failed\n");
		err = -ENOMEM;
		goto err_free_reg;
	}

	priv = dev->priv;
	priv->pdev = pdev;

	init_completion(&priv->fw_loaded);
	SET_IEEE80211_DEV(dev, &pdev->dev);
	pci_set_drvdata(pdev, dev);

	priv->map = ioremap(mem_addr, mem_len);
	if (!priv->map) {
		dev_err(&pdev->dev, "Cannot map device memory\n");
		err = -ENOMEM;
		goto err_free_dev;
	}

	priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
						  &priv->ring_control_dma);
	if (!priv->ring_control) {
		dev_err(&pdev->dev, "Cannot allocate rings\n");
		err = -ENOMEM;
		goto err_iounmap;
	}
	priv->common.open = p54p_open;
	priv->common.stop = p54p_stop;
	priv->common.tx = p54p_tx;

	spin_lock_init(&priv->lock);
	tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);

	err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
				      &priv->pdev->dev, GFP_KERNEL,
				      priv, p54p_firmware_step2);
	if (!err)
		return 0;

	pci_free_consistent(pdev, sizeof(*priv->ring_control),
			    priv->ring_control, priv->ring_control_dma);

 err_iounmap:
	iounmap(priv->map);

 err_free_dev:
	p54_free_common(dev);

 err_free_reg:
	pci_release_regions(pdev);
 err_disable_dev:
	pci_disable_device(pdev);
	pci_dev_put(pdev);
	return err;
}
예제 #18
0
int __devinit et131x_pci_setup(struct pci_dev *pdev,
			       const struct pci_device_id *ent)
{
	int result = 0;
	int pm_cap;
	bool pci_using_dac;
	struct net_device *netdev = NULL;
	struct et131x_adapter *adapter = NULL;

	/* Enable the device via the PCI subsystem */
	result = pci_enable_device(pdev);
	if (result != 0) {
		dev_err(&adapter->pdev->dev,
			"pci_enable_device() failed\n");
		goto out;
	}

	/* Perform some basic PCI checks */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		dev_err(&adapter->pdev->dev,
			  "Can't find PCI device's base address\n");
		result = -ENODEV;
		goto out;
	}

	result = pci_request_regions(pdev, DRIVER_NAME);
	if (result != 0) {
		dev_err(&adapter->pdev->dev,
			"Can't get PCI resources\n");
		goto err_disable;
	}

	/* Enable PCI bus mastering */
	pci_set_master(pdev);

	/* Query PCI for Power Mgmt Capabilities
	 *
	 * NOTE: Now reading PowerMgmt in another location; is this still
	 * needed?
	 */
	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
	if (pm_cap == 0) {
		dev_err(&adapter->pdev->dev,
			  "Cannot find Power Management capabilities\n");
		result = -EIO;
		goto err_release_res;
	}

	/* Check the DMA addressing support of this device */
	if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
		pci_using_dac = true;

		result =
		    pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
		if (result != 0) {
			dev_err(&pdev->dev,
				  "Unable to obtain 64 bit DMA for consistent allocations\n");
			goto err_release_res;
		}
	} else if (!pci_set_dma_mask(pdev, 0xffffffffULL)) {
		pci_using_dac = false;
	} else {
		dev_err(&adapter->pdev->dev,
			"No usable DMA addressing method\n");
		result = -EIO;
		goto err_release_res;
	}

	/* Allocate netdev and private adapter structs */
	netdev = et131x_device_alloc();
	if (netdev == NULL) {
		dev_err(&adapter->pdev->dev,
			"Couldn't alloc netdev struct\n");
		result = -ENOMEM;
		goto err_release_res;
	}

	/* Setup the fundamental net_device and private adapter structure elements  */
	SET_NETDEV_DEV(netdev, &pdev->dev);
	/*
	if (pci_using_dac) {
		netdev->features |= NETIF_F_HIGHDMA;
	}
	*/

	/*
	 * NOTE - Turn this on when we're ready to deal with SG-DMA
	 *
	 * NOTE: According to "Linux Device Drivers", 3rd ed, Rubini et al,
	 * if checksumming is not performed in HW, then the kernel will not
	 * use SG.
	 * From pp 510-511:
	 *
	 * "Note that the kernel does not perform scatter/gather I/O to your
	 * device if it does not also provide some form of checksumming as
	 * well. The reason is that, if the kernel has to make a pass over a
	 * fragmented ("nonlinear") packet to calculate the checksum, it
	 * might as well copy the data and coalesce the packet at the same
	 * time."
	 *
	 * This has been verified by setting the flags below and still not
	 * receiving a scattered buffer from the network stack, so leave it
	 * off until checksums are calculated in HW.
	 */
	/* netdev->features |= NETIF_F_SG; */
	/* netdev->features |= NETIF_F_NO_CSUM; */
	/* netdev->features |= NETIF_F_LLTX; */

	/* Allocate private adapter struct and copy in relevant information */
	adapter = netdev_priv(netdev);
	adapter->pdev = pci_dev_get(pdev);
	adapter->netdev = netdev;

	/* Do the same for the netdev struct */
	netdev->irq = pdev->irq;
	netdev->base_addr = pdev->resource[0].start;

	/* Initialize spinlocks here */
	spin_lock_init(&adapter->Lock);
	spin_lock_init(&adapter->TCBSendQLock);
	spin_lock_init(&adapter->TCBReadyQLock);
	spin_lock_init(&adapter->SendHWLock);
	spin_lock_init(&adapter->SendWaitLock);
	spin_lock_init(&adapter->RcvLock);
	spin_lock_init(&adapter->RcvPendLock);
	spin_lock_init(&adapter->FbrLock);
	spin_lock_init(&adapter->PHYLock);

	/* Parse configuration parameters into the private adapter struct */
	et131x_config_parse(adapter);

	/* Find the physical adapter
	 *
	 * NOTE: This is the equivalent of the MpFindAdapter() routine; can we
	 *       lump it's init with the device specific init below into a
	 *       single init function?
	 */
	/* while (et131x_find_adapter(adapter, pdev) != 0); */
	et131x_find_adapter(adapter, pdev);

	/* Map the bus-relative registers to system virtual memory */

	adapter->regs = ioremap_nocache(pci_resource_start(pdev, 0),
					      pci_resource_len(pdev, 0));
	if (adapter->regs == NULL) {
		dev_err(&pdev->dev, "Cannot map device registers\n");
		result = -ENOMEM;
		goto err_free_dev;
	}

	/* Perform device-specific initialization here (See code below) */

	/* If Phy COMA mode was enabled when we went down, disable it here. */
	writel(ET_PMCSR_INIT,  &adapter->regs->global.pm_csr);

	/* Issue a global reset to the et1310 */
	et131x_soft_reset(adapter);

	/* Disable all interrupts (paranoid) */
	et131x_disable_interrupts(adapter);

	/* Allocate DMA memory */
	result = et131x_adapter_memory_alloc(adapter);
	if (result != 0) {
		dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
		goto err_iounmap;
	}

	/* Init send data structures */
	et131x_init_send(adapter);

	/* Register the interrupt
	 *
	 * NOTE - This is being done in the open routine, where most other
	 *         Linux drivers setup IRQ handlers. Make sure device
	 *         interrupts are not turned on before the IRQ is registered!!
	 *
	 *         What we will do here is setup the task structure for the
	 *         ISR's deferred handler
	 */
	INIT_WORK(&adapter->task, et131x_isr_handler);

	/* Determine MAC Address, and copy into the net_device struct */
	et131x_setup_hardware_properties(adapter);

	memcpy(netdev->dev_addr, adapter->CurrentAddress, ETH_ALEN);

	/* Setup et1310 as per the documentation */
	et131x_adapter_setup(adapter);

	/* Create a timer to count errors received by the NIC */
	init_timer(&adapter->ErrorTimer);

	adapter->ErrorTimer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
	adapter->ErrorTimer.function = et131x_error_timer_handler;
	adapter->ErrorTimer.data = (unsigned long)adapter;

	/* Initialize link state */
	et131x_link_detection_handler((unsigned long)adapter);

	/* Intialize variable for counting how long we do not have
							link status */
	adapter->PoMgmt.TransPhyComaModeOnBoot = 0;

	/* We can enable interrupts now
	 *
	 *  NOTE - Because registration of interrupt handler is done in the
	 *         device's open(), defer enabling device interrupts to that
	 *         point
	 */

	/* Register the net_device struct with the Linux network layer */
	result = register_netdev(netdev);
	if (result != 0) {
		dev_err(&pdev->dev, "register_netdev() failed\n");
		goto err_mem_free;
	}

	/* Register the net_device struct with the PCI subsystem. Save a copy
	 * of the PCI config space for this device now that the device has
	 * been initialized, just in case it needs to be quickly restored.
	 */
	pci_set_drvdata(pdev, netdev);

	pci_save_state(adapter->pdev);

out:
	return result;

err_mem_free:
	et131x_adapter_memory_free(adapter);
err_iounmap:
	iounmap(adapter->regs);
err_free_dev:
	pci_dev_put(pdev);
	free_netdev(netdev);
err_release_res:
	pci_release_regions(pdev);
err_disable:
	pci_disable_device(pdev);
	goto out;
}
예제 #19
0
static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
					   const struct pci_device_id *ent)
{
	struct agp_bridge_data *bridge;
	struct pci_dev *bridge_dev;
	u32 temp, temp2;
	u8 cap_ptr = 0;

	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);

	switch (pdev->device) {
	case 0x0006:
		dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
		return -ENODEV;

	case PCI_DEVICE_ID_SERVERWORKS_HE:
	case PCI_DEVICE_ID_SERVERWORKS_LE:
	case 0x0007:
		break;

	default:
		if (cap_ptr)
			dev_err(&pdev->dev, "unsupported Serverworks chipset "
				"[%04x/%04x]\n", pdev->vendor, pdev->device);
		return -ENODEV;
	}

	/* Everything is on func 1 here so we are hardcoding function one */
	bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
			PCI_DEVFN(0, 1));
	if (!bridge_dev) {
		dev_info(&pdev->dev, "can't find secondary device\n");
		return -ENODEV;
	}

	serverworks_private.svrwrks_dev = bridge_dev;
	serverworks_private.gart_addr_ofs = 0x10;

	pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
		pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
		if (temp2 != 0) {
			dev_info(&pdev->dev, "64 bit aperture address, "
				 "but top bits are not zero; disabling AGP\n");
			return -ENODEV;
		}
		serverworks_private.mm_addr_ofs = 0x18;
	} else
		serverworks_private.mm_addr_ofs = 0x14;

	pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
		pci_read_config_dword(pdev,
				serverworks_private.mm_addr_ofs + 4, &temp2);
		if (temp2 != 0) {
			dev_info(&pdev->dev, "64 bit MMIO address, but top "
				 "bits are not zero; disabling AGP\n");
			return -ENODEV;
		}
	}

	bridge = agp_alloc_bridge();
	if (!bridge)
		return -ENOMEM;

	bridge->driver = &sworks_driver;
	bridge->dev_private_data = &serverworks_private,
	bridge->dev = pci_dev_get(pdev);

	pci_set_drvdata(pdev, bridge);
	return agp_add_bridge(bridge);
}
예제 #20
0
static int falcon_probe_nic(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data;
	struct falcon_board *board;
	int rc;

	
	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
	if (!nic_data)
		return -ENOMEM;
	efx->nic_data = nic_data;

	rc = -ENODEV;

	if (efx_nic_fpga_ver(efx) != 0) {
		netif_err(efx, probe, efx->net_dev,
			  "Falcon FPGA not supported\n");
		goto fail1;
	}

	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
		efx_oword_t nic_stat;
		struct pci_dev *dev;
		u8 pci_rev = efx->pci_dev->revision;

		if ((pci_rev == 0xff) || (pci_rev == 0)) {
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A0 not supported\n");
			goto fail1;
		}
		efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
		if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A1 1G not supported\n");
			goto fail1;
		}
		if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A1 PCI-X not supported\n");
			goto fail1;
		}

		dev = pci_dev_get(efx->pci_dev);
		while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
					     PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
					     dev))) {
			if (dev->bus == efx->pci_dev->bus &&
			    dev->devfn == efx->pci_dev->devfn + 1) {
				nic_data->pci_dev2 = dev;
				break;
			}
		}
		if (!nic_data->pci_dev2) {
			netif_err(efx, probe, efx->net_dev,
				  "failed to find secondary function\n");
			rc = -ENODEV;
			goto fail2;
		}
	}

	
	rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
	if (rc) {
		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
		goto fail3;
	}

	
	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
	if (rc)
		goto fail4;
	BUG_ON(efx->irq_status.dma_addr & 0x0f);

	netif_dbg(efx, probe, efx->net_dev,
		  "INT_KER at %llx (virt %p phys %llx)\n",
		  (u64)efx->irq_status.dma_addr,
		  efx->irq_status.addr,
		  (u64)virt_to_phys(efx->irq_status.addr));

	falcon_probe_spi_devices(efx);

	
	rc = falcon_probe_nvconfig(efx);
	if (rc) {
		if (rc == -EINVAL)
			netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
		goto fail5;
	}

	efx->timer_quantum_ns = 4968; 

	
	board = falcon_board(efx);
	board->i2c_adap.owner = THIS_MODULE;
	board->i2c_data = falcon_i2c_bit_operations;
	board->i2c_data.data = efx;
	board->i2c_adap.algo_data = &board->i2c_data;
	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
	strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
		sizeof(board->i2c_adap.name));
	rc = i2c_bit_add_bus(&board->i2c_adap);
	if (rc)
		goto fail5;

	rc = falcon_board(efx)->type->init(efx);
	if (rc) {
		netif_err(efx, probe, efx->net_dev,
			  "failed to initialise board\n");
		goto fail6;
	}

	nic_data->stats_disable_count = 1;
	setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
		    (unsigned long)efx);

	return 0;

 fail6:
	BUG_ON(i2c_del_adapter(&board->i2c_adap));
	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
 fail5:
	efx_nic_free_buffer(efx, &efx->irq_status);
 fail4:
 fail3:
	if (nic_data->pci_dev2) {
		pci_dev_put(nic_data->pci_dev2);
		nic_data->pci_dev2 = NULL;
	}
 fail2:
 fail1:
	kfree(efx->nic_data);
	return rc;
}
예제 #21
0
/*
* intel_sst_probe - PCI probe function
*
* @pci:	PCI device structure
* @pci_id: PCI device ID structure
*
* This function is called by OS when a device is found
* This enables the device, interrupt etc
*/
static int __devinit intel_sst_probe(struct pci_dev *pci,
			const struct pci_device_id *pci_id)
{
	int i, ret = 0;

	pr_debug("Probe for DID %x\n", pci->device);
	mutex_lock(&drv_ctx_lock);
	if (sst_drv_ctx) {
		pr_err("Only one sst handle is supported\n");
		mutex_unlock(&drv_ctx_lock);
		return -EBUSY;
	}

	sst_drv_ctx = kzalloc(sizeof(*sst_drv_ctx), GFP_KERNEL);
	if (!sst_drv_ctx) {
		pr_err("malloc fail\n");
		mutex_unlock(&drv_ctx_lock);
		return -ENOMEM;
	}
	mutex_unlock(&drv_ctx_lock);

	sst_drv_ctx->pci_id = pci->device;

	mutex_init(&sst_drv_ctx->stream_lock);
	mutex_init(&sst_drv_ctx->sst_lock);
	sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;

	sst_drv_ctx->stream_cnt = 0;
	sst_drv_ctx->encoded_cnt = 0;
	sst_drv_ctx->am_cnt = 0;
	sst_drv_ctx->pb_streams = 0;
	sst_drv_ctx->cp_streams = 0;
	sst_drv_ctx->unique_id = 0;
	sst_drv_ctx->pmic_port_instance = SST_DEFAULT_PMIC_PORT;

	INIT_LIST_HEAD(&sst_drv_ctx->ipc_dispatch_list);
	INIT_WORK(&sst_drv_ctx->ipc_post_msg.wq, sst_post_message);
	INIT_WORK(&sst_drv_ctx->ipc_process_msg.wq, sst_process_message);
	INIT_WORK(&sst_drv_ctx->ipc_process_reply.wq, sst_process_reply);
	INIT_WORK(&sst_drv_ctx->mad_ops.wq, sst_process_mad_ops);
	init_waitqueue_head(&sst_drv_ctx->wait_queue);

	sst_drv_ctx->mad_wq = create_workqueue("sst_mad_wq");
	if (!sst_drv_ctx->mad_wq)
		goto do_free_drv_ctx;
	sst_drv_ctx->post_msg_wq = create_workqueue("sst_post_msg_wq");
	if (!sst_drv_ctx->post_msg_wq)
		goto free_mad_wq;
	sst_drv_ctx->process_msg_wq = create_workqueue("sst_process_msg_wqq");
	if (!sst_drv_ctx->process_msg_wq)
		goto free_post_msg_wq;
	sst_drv_ctx->process_reply_wq = create_workqueue("sst_proces_reply_wq");
	if (!sst_drv_ctx->process_reply_wq)
		goto free_process_msg_wq;

	for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
		sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
		sst_drv_ctx->alloc_block[i].ops_block.condition = false;
	}
	spin_lock_init(&sst_drv_ctx->list_spin_lock);

	sst_drv_ctx->max_streams = pci_id->driver_data;
	pr_debug("Got drv data max stream %d\n",
				sst_drv_ctx->max_streams);
	for (i = 1; i <= sst_drv_ctx->max_streams; i++) {
		struct stream_info *stream = &sst_drv_ctx->streams[i];
		INIT_LIST_HEAD(&stream->bufs);
		mutex_init(&stream->lock);
		spin_lock_init(&stream->pcm_lock);
	}
	if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
		sst_drv_ctx->mmap_mem = NULL;
		sst_drv_ctx->mmap_len = SST_MMAP_PAGES * PAGE_SIZE;
		while (sst_drv_ctx->mmap_len > 0) {
			sst_drv_ctx->mmap_mem =
				kzalloc(sst_drv_ctx->mmap_len, GFP_KERNEL);
			if (sst_drv_ctx->mmap_mem) {
				pr_debug("Got memory %p size 0x%x\n",
					sst_drv_ctx->mmap_mem,
					sst_drv_ctx->mmap_len);
				break;
			}
			if (sst_drv_ctx->mmap_len < (SST_MMAP_STEP*PAGE_SIZE)) {
				pr_err("mem alloc fail...abort!!\n");
				ret = -ENOMEM;
				goto free_process_reply_wq;
			}
			sst_drv_ctx->mmap_len -= (SST_MMAP_STEP * PAGE_SIZE);
			pr_debug("mem alloc failed...trying %d\n",
						sst_drv_ctx->mmap_len);
		}
	}

	/* Init the device */
	ret = pci_enable_device(pci);
	if (ret) {
		pr_err("device can't be enabled\n");
		goto do_free_mem;
	}
	sst_drv_ctx->pci = pci_dev_get(pci);
	ret = pci_request_regions(pci, SST_DRV_NAME);
	if (ret)
		goto do_disable_device;
	/* map registers */
	/* SST Shim */
	sst_drv_ctx->shim_phy_add = pci_resource_start(pci, 1);
	sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
	if (!sst_drv_ctx->shim)
		goto do_release_regions;
	pr_debug("SST Shim Ptr %p\n", sst_drv_ctx->shim);

	/* Shared SRAM */
	sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
	if (!sst_drv_ctx->mailbox)
		goto do_unmap_shim;
	pr_debug("SRAM Ptr %p\n", sst_drv_ctx->mailbox);

	/* IRAM */
	sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
	if (!sst_drv_ctx->iram)
		goto do_unmap_sram;
	pr_debug("IRAM Ptr %p\n", sst_drv_ctx->iram);

	/* DRAM */
	sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
	if (!sst_drv_ctx->dram)
		goto do_unmap_iram;
	pr_debug("DRAM Ptr %p\n", sst_drv_ctx->dram);

	mutex_lock(&sst_drv_ctx->sst_lock);
	sst_drv_ctx->sst_state = SST_UN_INIT;
	mutex_unlock(&sst_drv_ctx->sst_lock);
	/* Register the ISR */
	ret = request_irq(pci->irq, intel_sst_interrupt,
		IRQF_SHARED, SST_DRV_NAME, sst_drv_ctx);
	if (ret)
		goto do_unmap_dram;
	pr_debug("Registered IRQ 0x%x\n", pci->irq);

	/*Register LPE Control as misc driver*/
	ret = misc_register(&lpe_ctrl);
	if (ret) {
		pr_err("couldn't register control device\n");
		goto do_free_irq;
	}

	if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
		ret = misc_register(&lpe_dev);
		if (ret) {
			pr_err("couldn't register LPE device\n");
			goto do_free_misc;
 		}
	} else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID) {
		u32 csr;

		/*allocate mem for fw context save during suspend*/
		sst_drv_ctx->fw_cntx = kzalloc(FW_CONTEXT_MEM, GFP_KERNEL);
		if (!sst_drv_ctx->fw_cntx) {
			ret = -ENOMEM;
			goto do_free_misc;
		}
		/*setting zero as that is valid mem to restore*/
		sst_drv_ctx->fw_cntx_size = 0;

		/*set lpe start clock and ram size*/
		csr = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
		csr |= 0x30060; /*remove the clock ratio after fw fix*/
		sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr);
	}
	sst_drv_ctx->lpe_stalled = 0;
	pci_set_drvdata(pci, sst_drv_ctx);
	pm_runtime_allow(&pci->dev);
	pm_runtime_put_noidle(&pci->dev);
	pr_debug("...successfully done!!!\n");
	return ret;

do_free_misc:
	misc_deregister(&lpe_ctrl);
do_free_irq:
	free_irq(pci->irq, sst_drv_ctx);
do_unmap_dram:
	iounmap(sst_drv_ctx->dram);
do_unmap_iram:
	iounmap(sst_drv_ctx->iram);
do_unmap_sram:
	iounmap(sst_drv_ctx->mailbox);
do_unmap_shim:
	iounmap(sst_drv_ctx->shim);
do_release_regions:
	pci_release_regions(pci);
do_disable_device:
	pci_disable_device(pci);
do_free_mem:
	kfree(sst_drv_ctx->mmap_mem);
free_process_reply_wq:
	destroy_workqueue(sst_drv_ctx->process_reply_wq);
free_process_msg_wq:
	destroy_workqueue(sst_drv_ctx->process_msg_wq);
free_post_msg_wq:
	destroy_workqueue(sst_drv_ctx->post_msg_wq);
free_mad_wq:
	destroy_workqueue(sst_drv_ctx->mad_wq);
do_free_drv_ctx:
	kfree(sst_drv_ctx);
	sst_drv_ctx = NULL;
	pr_err("Probe failed with %d\n", ret);
	return ret;
}
예제 #22
0
/*
 * There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID,
 * so amd8131_probe() would be called by kernel 4 times, with different
 * address of pci_dev for each of them each time.
 */
static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct amd8131_dev_info *dev_info;

	for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
		dev_info++)
		if (dev_info->devfn == dev->devfn)
			break;

	if (dev_info->inst == NO_BRIDGE) /* should never happen */
		return -ENODEV;

	/*
	 * We can't call pci_get_device() as we are used to do because
	 * there are 4 of them but pci_dev_get() instead.
	 */
	dev_info->dev = pci_dev_get(dev);

	if (pci_enable_device(dev_info->dev)) {
		pci_dev_put(dev_info->dev);
		printk(KERN_ERR "failed to enable:"
			"vendor %x, device %x, devfn %x, name %s\n",
			PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
			dev_info->devfn, dev_info->ctl_name);
		return -ENODEV;
	}

	/*
	 * we do not allocate extra private structure for
	 * edac_pci_ctl_info, but make use of existing
	 * one instead.
	 */
	dev_info->edac_idx = edac_pci_alloc_index();
	dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name);
	if (!dev_info->edac_dev)
		return -ENOMEM;

	dev_info->edac_dev->pvt_info = dev_info;
	dev_info->edac_dev->dev = &dev_info->dev->dev;
	dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
	dev_info->edac_dev->ctl_name = dev_info->ctl_name;
	dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);

	if (edac_op_state == EDAC_OPSTATE_POLL)
		dev_info->edac_dev->edac_check = amd8131_chipset.check;

	if (amd8131_chipset.init)
		amd8131_chipset.init(dev_info);

	if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) {
		printk(KERN_ERR "failed edac_pci_add_device() for %s\n",
			dev_info->ctl_name);
		edac_pci_free_ctl_info(dev_info->edac_dev);
		return -ENODEV;
	}

	printk(KERN_INFO "added one device on AMD8131 "
		"vendor %x, device %x, devfn %x, name %s\n",
		PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
		dev_info->devfn, dev_info->ctl_name);

	return 0;
}
예제 #23
0
static int tpci200_pci_probe(struct pci_dev *pdev,
                             const struct pci_device_id *id)
{
    int ret, i;
    struct tpci200_board *tpci200;
    u32 reg32;

    tpci200 = kzalloc(sizeof(struct tpci200_board), GFP_KERNEL);
    if (!tpci200)
        return -ENOMEM;

    tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL);
    if (!tpci200->info) {
        ret = -ENOMEM;
        goto out_err_info;
    }

    pci_dev_get(pdev);

    /* Obtain a mapping of the carrier's PCI configuration registers */
    ret = pci_request_region(pdev, TPCI200_CFG_MEM_BAR,
                             KBUILD_MODNAME " Configuration Memory");
    if (ret) {
        dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory");
        ret = -EBUSY;
        goto out_err_pci_request;
    }
    tpci200->info->cfg_regs = ioremap_nocache(
                                  pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
                                  pci_resource_len(pdev, TPCI200_CFG_MEM_BAR));
    if (!tpci200->info->cfg_regs) {
        dev_err(&pdev->dev, "Failed to map PCI Configuration Memory");
        ret = -EFAULT;
        goto out_err_ioremap;
    }

    /* Disable byte swapping for 16 bit IP module access. This will ensure
     * that the Industrypack big endian byte order is preserved by the
     * carrier. */
    reg32 = ioread32(tpci200->info->cfg_regs + LAS1_DESC);
    reg32 |= 1 << LAS_BIT_BIGENDIAN;
    iowrite32(reg32, tpci200->info->cfg_regs + LAS1_DESC);

    reg32 = ioread32(tpci200->info->cfg_regs + LAS2_DESC);
    reg32 |= 1 << LAS_BIT_BIGENDIAN;
    iowrite32(reg32, tpci200->info->cfg_regs + LAS2_DESC);

    /* Save struct pci_dev pointer */
    tpci200->info->pdev = pdev;
    tpci200->info->id_table = (struct pci_device_id *)id;

    /* register the device and initialize it */
    ret = tpci200_install(tpci200);
    if (ret) {
        dev_err(&pdev->dev, "error during tpci200 install\n");
        ret = -ENODEV;
        goto out_err_install;
    }

    /* Register the carrier in the industry pack bus driver */
    tpci200->info->ipack_bus = ipack_bus_register(&pdev->dev,
                               TPCI200_NB_SLOT,
                               &tpci200_bus_ops);
    if (!tpci200->info->ipack_bus) {
        dev_err(&pdev->dev,
                "error registering the carrier on ipack driver\n");
        ret = -EFAULT;
        goto out_err_bus_register;
    }

    /* save the bus number given by ipack to logging purpose */
    tpci200->number = tpci200->info->ipack_bus->bus_nr;
    dev_set_drvdata(&pdev->dev, tpci200);

    for (i = 0; i < TPCI200_NB_SLOT; i++)
        tpci200_create_device(tpci200, i);
    return 0;

out_err_bus_register:
    tpci200_uninstall(tpci200);
out_err_install:
    iounmap(tpci200->info->cfg_regs);
out_err_ioremap:
    pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
out_err_pci_request:
    pci_dev_put(pdev);
    kfree(tpci200->info);
out_err_info:
    kfree(tpci200);
    return ret;
}
예제 #24
0
static int __devinit intel_mid_vibra_probe(struct pci_dev *pci,
			const struct pci_device_id *pci_id)
{
	struct vibra_info *info;
	int ret = 0;

	pr_debug("Probe for DID %x\n", pci->device);

	info =  devm_kzalloc(&pci->dev, sizeof(*info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

	ret = gpio_request_one(INTEL_VIBRA_ENABLE_GPIO, GPIOF_DIR_OUT,
				 "VIBRA ENABLE");
	if (ret != 0) {
		pr_err("gpio_request(%d) fails:%d\n",
			INTEL_VIBRA_ENABLE_GPIO, ret);
		goto out;
	}

	ret = gpio_request_one(INTEL_PWM_ENABLE_GPIO, GPIOF_DIR_OUT,
				  "PWM ENABLE");

	if (ret != 0) {
		pr_err("gpio_request(%d) fails:%d\n",
			INTEL_PWM_ENABLE_GPIO, ret);
		goto do_freegpio_vibra_enable;
	}

	/* Init the device */
	ret = pci_enable_device(pci);
	if (ret) {
		pr_err("device can't be enabled\n");
		goto do_freegpio_pwm;
	}
	ret = pci_request_regions(pci, INTEL_VIBRA_DRV_NAME);

	if (ret)
		goto do_disable_device;
	info->pci = pci_dev_get(pci);

	/* vibra Shim */
	info->shim =  pci_ioremap_bar(pci, 0);
	if (!info->shim) {
		pr_err("ioremap failed for vibra driver\n");
		goto do_release_regions;
	}

	/*set default value to Max */
	info->pwm.part.pwmbu = INTEL_VIBRA_MAX_BASEUNIT;
	info->pwm.part.pwmtd = INTEL_VIBRA_MAX_TIMEDIVISOR;

	info->dev = &pci->dev;
	info->name = "intel_mid:vibrator";
	mutex_init(&info->lock);

	if (vibra_register_sysfs(info) < 0) {
		pr_err("could not register sysfs files\n");
		goto do_unmap_shim;
	}
	lnw_gpio_set_alt(INTEL_PWM_ENABLE_GPIO, LNW_ALT_2);
	vibra_pwm_configure(info, true);

	pci_set_drvdata(pci, info);
	pm_runtime_allow(&pci->dev);
	pm_runtime_put_noidle(&pci->dev);
	return ret;

do_unmap_shim:
	iounmap(info->shim);
do_release_regions:
	pci_release_regions(pci);
do_disable_device:
	pci_disable_device(pci);
do_freegpio_pwm:
	gpio_free(INTEL_PWM_ENABLE_GPIO);
do_freegpio_vibra_enable:
	gpio_free(INTEL_VIBRA_ENABLE_GPIO);
out:
	return ret;
}
예제 #25
0
/*
* intel_sst_probe - PCI probe function
*
* @pci:	PCI device structure
* @pci_id: PCI device ID structure
*
* This function is called by OS when a device is found
* This enables the device, interrupt etc
*/
static int __devinit intel_sst_probe(struct pci_dev *pci,
                                     const struct pci_device_id *pci_id)
{
    int i, ret = 0;

    pr_debug("Probe for DID %x\n", pci->device);
    mutex_lock(&drv_ctx_lock);
    if (sst_drv_ctx) {
        pr_err("Only one sst handle is supported\n");
        mutex_unlock(&drv_ctx_lock);
        return -EBUSY;
    }

    sst_drv_ctx = kzalloc(sizeof(*sst_drv_ctx), GFP_KERNEL);
    if (!sst_drv_ctx) {
        pr_err("malloc fail\n");
        mutex_unlock(&drv_ctx_lock);
        return -ENOMEM;
    }
    mutex_unlock(&drv_ctx_lock);

    sst_drv_ctx->pci_id = pci->device;

    mutex_init(&sst_drv_ctx->stream_lock);
    mutex_init(&sst_drv_ctx->sst_lock);
    mutex_init(&sst_drv_ctx->mixer_ctrl_lock);

    sst_drv_ctx->stream_cnt = 0;
    sst_drv_ctx->encoded_cnt = 0;
    sst_drv_ctx->am_cnt = 0;
    sst_drv_ctx->pb_streams = 0;
    sst_drv_ctx->cp_streams = 0;
    sst_drv_ctx->unique_id = 0;
    sst_drv_ctx->pmic_port_instance = SST_DEFAULT_PMIC_PORT;
    sst_drv_ctx->fw = NULL;
    sst_drv_ctx->fw_in_mem = NULL;

    INIT_LIST_HEAD(&sst_drv_ctx->ipc_dispatch_list);
    INIT_WORK(&sst_drv_ctx->ipc_post_msg.wq, sst_post_message);
    INIT_WORK(&sst_drv_ctx->ipc_process_msg.wq, sst_process_message);
    INIT_WORK(&sst_drv_ctx->ipc_process_reply.wq, sst_process_reply);
    init_waitqueue_head(&sst_drv_ctx->wait_queue);

    sst_drv_ctx->mad_wq = create_singlethread_workqueue("sst_mad_wq");
    if (!sst_drv_ctx->mad_wq)
        goto do_free_drv_ctx;
    sst_drv_ctx->post_msg_wq = create_workqueue("sst_post_msg_wq");
    if (!sst_drv_ctx->post_msg_wq)
        goto free_mad_wq;
    sst_drv_ctx->process_msg_wq = create_workqueue("sst_process_msg_wqq");
    if (!sst_drv_ctx->process_msg_wq)
        goto free_post_msg_wq;
    sst_drv_ctx->process_reply_wq = create_workqueue("sst_proces_reply_wq");
    if (!sst_drv_ctx->process_reply_wq)
        goto free_process_msg_wq;

    for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
        sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
        sst_drv_ctx->alloc_block[i].ops_block.condition = false;
    }
    spin_lock_init(&sst_drv_ctx->ipc_spin_lock);

    sst_drv_ctx->max_streams = pci_id->driver_data;
    pr_debug("Got drv data max stream %d\n",
             sst_drv_ctx->max_streams);
    for (i = 1; i <= sst_drv_ctx->max_streams; i++) {
        struct stream_info *stream = &sst_drv_ctx->streams[i];
        INIT_LIST_HEAD(&stream->bufs);
        mutex_init(&stream->lock);
        spin_lock_init(&stream->pcm_lock);
    }
    if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
        sst_drv_ctx->mmap_mem = NULL;
        sst_drv_ctx->mmap_len = SST_MMAP_PAGES * PAGE_SIZE;
        while (sst_drv_ctx->mmap_len > 0) {
            sst_drv_ctx->mmap_mem =
                kzalloc(sst_drv_ctx->mmap_len, GFP_KERNEL);
            if (sst_drv_ctx->mmap_mem) {
                pr_debug("Got memory %p size 0x%x\n",
                         sst_drv_ctx->mmap_mem,
                         sst_drv_ctx->mmap_len);
                break;
            }
            if (sst_drv_ctx->mmap_len < (SST_MMAP_STEP*PAGE_SIZE)) {
                pr_err("mem alloc fail...abort!!\n");
                ret = -ENOMEM;
                goto free_process_reply_wq;
            }
            sst_drv_ctx->mmap_len -= (SST_MMAP_STEP * PAGE_SIZE);
            pr_debug("mem alloc failed...trying %d\n",
                     sst_drv_ctx->mmap_len);
        }
    }
    if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID) {
        sst_drv_ctx->device_input_mixer = SST_STREAM_DEVICE_IHF
                                          | SST_INPUT_STREAM_PCM;
    }

    /* Init the device */
    ret = pci_enable_device(pci);
    if (ret) {
        pr_err("device can't be enabled\n");
        goto do_free_mem;
    }
    sst_drv_ctx->pci = pci_dev_get(pci);
    ret = pci_request_regions(pci, SST_DRV_NAME);
    if (ret)
        goto do_disable_device;
    /* map registers */
    /* SST Shim */
    sst_drv_ctx->shim_phy_add = pci_resource_start(pci, 1);
    sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
    if (!sst_drv_ctx->shim)
        goto do_release_regions;
    pr_debug("SST Shim Ptr %p\n", sst_drv_ctx->shim);

    /* Shared SRAM */
    sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
    if (!sst_drv_ctx->mailbox)
        goto do_unmap_shim;
    pr_debug("SRAM Ptr %p\n", sst_drv_ctx->mailbox);

    /* IRAM */
    sst_drv_ctx->iram_base = pci_resource_start(pci, 3);
    sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
    if (!sst_drv_ctx->iram)
        goto do_unmap_sram;
    pr_debug("IRAM Ptr %p\n", sst_drv_ctx->iram);

    /* DRAM */
    sst_drv_ctx->dram_base = pci_resource_start(pci, 4);
    sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
    if (!sst_drv_ctx->dram)
        goto do_unmap_iram;
    pr_debug("DRAM Ptr %p\n", sst_drv_ctx->dram);

    sst_set_fw_state_locked(sst_drv_ctx, SST_UN_INIT);
    /* Register the ISR */
    ret = request_threaded_irq(pci->irq, intel_sst_interrupt,
                               intel_sst_irq_thread, IRQF_SHARED, SST_DRV_NAME,
                               sst_drv_ctx);
    if (ret)
        goto do_unmap_dram;
    pr_debug("Registered IRQ 0x%x\n", pci->irq);

    /*Register LPE Control as misc driver*/
    ret = misc_register(&lpe_ctrl);
    if (ret) {
        pr_err("couldn't register control device\n");
        goto do_free_irq;
    }

    if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
        ret = misc_register(&lpe_dev);
        if (ret) {
            pr_err("couldn't register LPE device\n");
            goto do_free_misc;
        }
    } else if ((sst_drv_ctx->pci_id == SST_MFLD_PCI_ID) ||
               (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)) {
        u32 csr;
        u32 csr2;
        u32 clkctl;

        /*allocate mem for fw context save during suspend*/
        sst_drv_ctx->fw_cntx = kzalloc(FW_CONTEXT_MEM, GFP_KERNEL);
        if (!sst_drv_ctx->fw_cntx) {
            ret = -ENOMEM;
            goto do_free_misc;
        }
        /*setting zero as that is valid mem to restore*/
        sst_drv_ctx->fw_cntx_size = 0;

        /*set lpe start clock and ram size*/
        csr = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
        csr |= 0x30000;
        /*make sure clksel set to OSC for SSP0,1 (default)*/
        csr &= 0xFFFFFFF3;
        sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr);

        /*set clock output enable for SSP0,1,3*/
        clkctl = sst_shim_read(sst_drv_ctx->shim, SST_CLKCTL);
        if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
            clkctl |= (0x7 << 16);
        else
            clkctl |= ((1<<16)|(1<<17));
        sst_shim_write(sst_drv_ctx->shim, SST_CLKCTL, clkctl);

        /* set SSP0 & SSP1 disable DMA Finish*/
        csr2 = sst_shim_read(sst_drv_ctx->shim, SST_CSR2);
        /*set SSP3 disable DMA finsh for SSSP3 */
        csr2 |= BIT(1)|BIT(2);
        sst_shim_write(sst_drv_ctx->shim, SST_CSR2, csr2);
    }

    /* GPIO_PIN 12,13,74,75 needs to be configured in
     * ALT_FUNC_2 mode for SSP3 IOs
     */
    if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID) {
        lnw_gpio_set_alt(CLV_I2S_3_CLK_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_I2S_3_FS_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_I2S_3_TXD_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_I2S_3_RXD_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_VIBRA_PWM_GPIO_PIN, LNW_ALT_2);

        vibra_pwm_configure(true);
    }

    sst_drv_ctx->lpe_stalled = 0;
    pci_set_drvdata(pci, sst_drv_ctx);
    pm_runtime_allow(&pci->dev);
    pm_runtime_put_noidle(&pci->dev);
    register_sst(&pci->dev);

    sst_drv_ctx->qos = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL);
    if (!sst_drv_ctx->qos)
        goto do_free_misc;
    pm_qos_add_request(sst_drv_ctx->qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);

    pr_info("%s successfully done!\n", __func__);
    return ret;

do_free_misc:
    misc_deregister(&lpe_ctrl);
do_free_irq:
    free_irq(pci->irq, sst_drv_ctx);
do_unmap_dram:
    iounmap(sst_drv_ctx->dram);
do_unmap_iram:
    iounmap(sst_drv_ctx->iram);
do_unmap_sram:
    iounmap(sst_drv_ctx->mailbox);
do_unmap_shim:
    iounmap(sst_drv_ctx->shim);
do_release_regions:
    pci_release_regions(pci);
do_disable_device:
    pci_disable_device(pci);
do_free_mem:
    kfree(sst_drv_ctx->mmap_mem);
free_process_reply_wq:
    destroy_workqueue(sst_drv_ctx->process_reply_wq);
free_process_msg_wq:
    destroy_workqueue(sst_drv_ctx->process_msg_wq);
free_post_msg_wq:
    destroy_workqueue(sst_drv_ctx->post_msg_wq);
free_mad_wq:
    destroy_workqueue(sst_drv_ctx->mad_wq);
do_free_drv_ctx:
    kfree(sst_drv_ctx);
    sst_drv_ctx = NULL;
    pr_err("Probe failed with %d\n", ret);
    return ret;
}