Пример #1
0
void hcd_remove(
#ifdef LM_INTERFACE
		       struct lm_device *_dev
#elif  defined(PCI_INTERFACE)
		       struct pci_dev *_dev
#endif
    )
{
#ifdef LM_INTERFACE
	dwc_otg_device_t *otg_dev = lm_get_drvdata(_dev);
#elif  defined(PCI_INTERFACE)
	dwc_otg_device_t *otg_dev = pci_get_drvdata(_dev);
#endif

	dwc_otg_hcd_t *dwc_otg_hcd;
	struct usb_hcd *hcd;

	DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n");

	if (!otg_dev) {
		DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__);
		return;
	}

	dwc_otg_hcd = otg_dev->hcd;

	if (!dwc_otg_hcd) {
		DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__);
		return;
	}
Пример #2
0
static int rt2860_resume(
	struct pci_dev *pci_dev)
{
	struct net_device *net_dev = pci_get_drvdata(pci_dev);
	PRTMP_ADAPTER pAd = (PRTMP_ADAPTER)NULL;

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
	INT32 retval;


	// set the power state of a PCI device
	// PCI has 4 power states, DO (normal) ~ D3(less power)
	// in include/linux/pci.h, you can find that
	// #define PCI_D0          ((pci_power_t __force) 0)
	// #define PCI_D1          ((pci_power_t __force) 1)
	// #define PCI_D2          ((pci_power_t __force) 2)
	// #define PCI_D3hot       ((pci_power_t __force) 3)
	// #define PCI_D3cold      ((pci_power_t __force) 4)
	// #define PCI_UNKNOWN     ((pci_power_t __force) 5)
	// #define PCI_POWER_ERROR ((pci_power_t __force) -1)
	retval = pci_set_power_state(pci_dev, PCI_D0);

	// restore the saved state of a PCI device
	pci_restore_state(pci_dev);

	// initialize device before it's used by a driver
	if (pci_enable_device(pci_dev))
	{
		printk("pci enable fail!\n");
		return 0;
	}
#endif

	DBGPRINT(RT_DEBUG_TRACE, ("===> rt2860_resume()\n"));

	if (net_dev == NULL)
	{
		DBGPRINT(RT_DEBUG_ERROR, ("net_dev == NULL!\n"));
	}
	else
		GET_PAD_FROM_NET_DEV(pAd, net_dev);

	if (pAd != NULL)
	{
		/* we can not use IFF_UP because ra0 down but ra1 up */
		/* and 1 suspend/resume function for 1 module, not for each interface */
		/* so Linux will call suspend/resume function once */
		if (VIRTUAL_IF_NUM(pAd) > 0)
		{
			// mark device as attached from system and restart if needed
			netif_device_attach(net_dev);

			if (rt28xx_open((PNET_DEV)net_dev) != 0)
			{
				// open fail
				DBGPRINT(RT_DEBUG_TRACE, ("<=== rt2860_resume()\n"));
				return 0;
			}

			// increase MODULE use count
			RT_MOD_INC_USE_COUNT();

			RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS);
			RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_RADIO_OFF);

			netif_start_queue(net_dev);
			netif_carrier_on(net_dev);
			netif_wake_queue(net_dev);
		}
	}

	DBGPRINT(RT_DEBUG_TRACE, ("<=== rt2860_resume()\n"));
	return 0;
}
Пример #3
0
void oxygen_pci_remove(struct pci_dev *pci)
{
	snd_card_free(pci_get_drvdata(pci));
	pci_set_drvdata(pci, NULL);
}
Пример #4
0
static void
nouveau_switcheroo_reprobe(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	nouveau_fbcon_output_poll_changed(dev);
}
Пример #5
0
static void mga_pci_remove(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	drm_put_dev(dev);
}
Пример #6
0
static int
radeon_pci_resume(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	return radeon_resume_kms(dev);
}
Пример #7
0
static int rt_pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
{
	struct net_device *net_dev = pci_get_drvdata(pci_dev);
	VOID *pAd = NULL;
	INT32 retval = 0;


	DBGPRINT(RT_DEBUG_TRACE, ("===>%s()\n", __FUNCTION__));

	if (net_dev == NULL)
	{
		DBGPRINT(RT_DEBUG_ERROR, ("net_dev == NULL!\n"));
	}
	else
	{
		ULONG IfNum;

		GET_PAD_FROM_NET_DEV(pAd, net_dev);

		/* we can not use IFF_UP because ra0 down but ra1 up */
		/* and 1 suspend/resume function for 1 module, not for each interface */
		/* so Linux will call suspend/resume function once */
		RTMP_DRIVER_VIRTUAL_INF_NUM_GET(pAd, &IfNum);
		if (IfNum > 0)
		{
			/* avoid users do suspend after interface is down */

			/* stop interface */
			netif_carrier_off(net_dev);
			netif_stop_queue(net_dev);

			/* mark device as removed from system and therefore no longer available */
			netif_device_detach(net_dev);

			/* mark halt flag */
/*			RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS); */
/*			RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_RADIO_OFF); */
			RTMP_DRIVER_PCI_SUSPEND(pAd);

			/* take down the device */
			rt28xx_close((PNET_DEV)net_dev);

			RT_MOD_DEC_USE_COUNT();
		}
	}

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
	/*
		reference to http://vovo2000.com/type-lab/linux/kernel-api/linux-kernel-api.html
		enable device to generate PME# when suspended
		pci_choose_state(): Choose the power state of a PCI device to be suspended
	*/
	retval = pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state), 1);
	/* save the PCI configuration space of a device before suspending */
	pci_save_state(pci_dev);
	/* disable PCI device after use */
	pci_disable_device(pci_dev);

	retval = pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
#endif

	DBGPRINT(RT_DEBUG_TRACE, ("<===%s()\n", __FUNCTION__));
	return retval;
}
Пример #8
0
/*
 * Each ring entry is 128 bits:
 * [7:0]    - interrupt source id
 * [31:8]   - reserved
 * [59:32]  - interrupt source data
 * [63:60]  - reserved
 * [71:64]  - RINGID
 * [79:72]  - VMID
 * [127:80] - reserved
 */
static void vector(struct pci_dev *dev, u32 id, u32 data, u8 ring_id,
					u8 *irq_thd, u8 *dce6_irqs_acked)
{
	struct dev_drv_data *dd;

	dd = pci_get_drvdata(dev);

	switch (id) {
	case VECTOR_ID_HPD:
		if (!*dce6_irqs_acked) {
			dce6_irqs_ack(dd->dce);
			*dce6_irqs_acked = 1;
                }
		dce6_hpd_irq(dd->dce, data);
		*irq_thd = IRQ_THD_ENA;
		break;
	case VECTOR_ID_D0:
	case VECTOR_ID_D1:
	case VECTOR_ID_D2:
	case VECTOR_ID_D3:
	case VECTOR_ID_D4:
	case VECTOR_ID_D5:
		if (!*dce6_irqs_acked) {
			dce6_irqs_ack(dd->dce);
			*dce6_irqs_acked = 1;
                }
		if (data == Dx_VBLANK) {/* only page flipping in vblank */
			struct timespec tp;
			getrawmonotonic(&tp);

			dce6_pf_irq(dd->dce, id - 1, tp);
		}
		break;
	case VECTOR_ID_EOP:
		switch(ring_id) {
		case 0:
			atomic_set(&dd->gpu_3d.fence.bottom,
				le32_to_cpup(dd->gpu_3d.fence.cpu_addr));

			wake_up(&dd->gpu_3d.fence.wait_queue);
			break;
		case 1:
			atomic_set(&dd->gpu_c0.fence.bottom,
				le32_to_cpup(dd->gpu_c0.fence.cpu_addr));

			wake_up(&dd->gpu_c0.fence.wait_queue);
			break;
		case 2:
			atomic_set(&dd->gpu_c1.fence.bottom,
				le32_to_cpup(dd->gpu_c1.fence.cpu_addr));

			wake_up(&dd->gpu_c1.fence.wait_queue);
			break;
		};
		break;
	case VECTOR_ID_DMA_0:
		atomic_set(&dd->dmas[0].fence.bottom,
				le32_to_cpup(dd->dmas[0].fence.cpu_addr));

		wake_up(&dd->dmas[0].fence.wait_queue);
		break;
	case VECTOR_ID_DMA_1:
		atomic_set(&dd->dmas[1].fence.bottom,
				le32_to_cpup(dd->dmas[1].fence.cpu_addr));
		wake_up(&dd->dmas[1].fence.wait_queue);
		break;
	}
}
Пример #9
0
static int init_chipset_siimage(struct pci_dev *dev)
{
	struct ide_host *host = pci_get_drvdata(dev);
	void __iomem *ioaddr = host->host_priv;
	unsigned long base, scsc_addr;
	u8 rev = dev->revision, tmp;

	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, rev ? 1 : 255);

	if (ioaddr)
		pci_set_master(dev);

	base = (unsigned long)ioaddr;

	if (ioaddr && pdev_is_sata(dev)) {
		u32 tmp32, irq_mask;

		
		irq_mask = (1 << 22) | (1 << 23);
		tmp32 = readl(ioaddr + 0x48);
		if (tmp32 & irq_mask) {
			tmp32 &= ~irq_mask;
			writel(tmp32, ioaddr + 0x48);
			readl(ioaddr + 0x48); 
		}
		writel(0, ioaddr + 0x148);
		writel(0, ioaddr + 0x1C8);
	}

	sil_iowrite8(dev, 0, base ? (base + 0xB4) : 0x80);
	sil_iowrite8(dev, 0, base ? (base + 0xF4) : 0x84);

	scsc_addr = base ? (base + 0x4A) : 0x8A;
	tmp = sil_ioread8(dev, scsc_addr);

	switch (tmp & 0x30) {
	case 0x00:
		
		sil_iowrite8(dev, tmp | 0x10, scsc_addr);
		break;
	case 0x30:
		
		sil_iowrite8(dev, tmp & ~0x20, scsc_addr);
	case 0x10:
		
		break;
	case 0x20:
		
		break;
	}

	tmp = sil_ioread8(dev, scsc_addr);

	sil_iowrite8 (dev,       0x72, base + 0xA1);
	sil_iowrite16(dev,     0x328A, base + 0xA2);
	sil_iowrite32(dev, 0x62DD62DD, base + 0xA4);
	sil_iowrite32(dev, 0x43924392, base + 0xA8);
	sil_iowrite32(dev, 0x40094009, base + 0xAC);
	sil_iowrite8 (dev,       0x72, base ? (base + 0xE1) : 0xB1);
	sil_iowrite16(dev,     0x328A, base ? (base + 0xE2) : 0xB2);
	sil_iowrite32(dev, 0x62DD62DD, base ? (base + 0xE4) : 0xB4);
	sil_iowrite32(dev, 0x43924392, base ? (base + 0xE8) : 0xB8);
	sil_iowrite32(dev, 0x40094009, base ? (base + 0xEC) : 0xBC);

	if (base && pdev_is_sata(dev)) {
		writel(0xFFFF0000, ioaddr + 0x108);
		writel(0xFFFF0000, ioaddr + 0x188);
		writel(0x00680000, ioaddr + 0x148);
		writel(0x00680000, ioaddr + 0x1C8);
	}

	
	if (!pdev_is_sata(dev)) {
		static const char *clk_str[] =
			{ "== 100", "== 133", "== 2X PCI", "DISABLED!" };

		tmp >>= 4;
		printk(KERN_INFO DRV_NAME " %s: BASE CLOCK %s\n",
			pci_name(dev), clk_str[tmp & 3]);
	}

	return 0;
}
Пример #10
0
void xen_pcibk_do_op(struct work_struct *data)
{
	struct xen_pcibk_device *pdev =
		container_of(data, struct xen_pcibk_device, op_work);
	struct pci_dev *dev;
	struct xen_pcibk_dev_data *dev_data = NULL;
	struct xen_pci_op *op = &pdev->op;
	int test_intx = 0;
#ifdef CONFIG_PCI_MSI
	unsigned int nr = 0;
#endif

	*op = pdev->sh_info->op;
	barrier();
	dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);

	if (dev == NULL)
		op->err = XEN_PCI_ERR_dev_not_found;
	else {
		dev_data = pci_get_drvdata(dev);
		if (dev_data)
			test_intx = dev_data->enable_intx;
		switch (op->cmd) {
		case XEN_PCI_OP_conf_read:
			op->err = xen_pcibk_config_read(dev,
				  op->offset, op->size, &op->value);
			break;
		case XEN_PCI_OP_conf_write:
			op->err = xen_pcibk_config_write(dev,
				  op->offset, op->size,	op->value);
			break;
#ifdef CONFIG_PCI_MSI
		case XEN_PCI_OP_enable_msi:
			op->err = xen_pcibk_enable_msi(pdev, dev, op);
			break;
		case XEN_PCI_OP_disable_msi:
			op->err = xen_pcibk_disable_msi(pdev, dev, op);
			break;
		case XEN_PCI_OP_enable_msix:
			nr = op->value;
			op->err = xen_pcibk_enable_msix(pdev, dev, op);
			break;
		case XEN_PCI_OP_disable_msix:
			op->err = xen_pcibk_disable_msix(pdev, dev, op);
			break;
#endif
		default:
			op->err = XEN_PCI_ERR_not_implemented;
			break;
		}
	}
	if (!op->err && dev && dev_data) {
		/* Transition detected */
		if ((dev_data->enable_intx != test_intx))
			xen_pcibk_control_isr(dev, 0 /* no reset */);
	}
	pdev->sh_info->op.err = op->err;
	pdev->sh_info->op.value = op->value;
#ifdef CONFIG_PCI_MSI
	if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
		unsigned int i;

		for (i = 0; i < nr; i++)
			pdev->sh_info->op.msix_entries[i].vector =
				op->msix_entries[i].vector;
	}
#endif
	/* Tell the driver domain that we're done. */
	wmb();
	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
	notify_remote_via_irq(pdev->evtchn_irq);

	/* Mark that we're done. */
	smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
	clear_bit(_PDEVF_op_active, &pdev->flags);
	smp_mb__after_clear_bit(); /* /before/ final check for work */

	/* Check to see if the driver domain tried to start another request in
	 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
	*/
	xen_pcibk_test_and_schedule_op(pdev);
}
Пример #11
0
void pch_ch_event_write(struct pci_dev *pdev, u32 val)
{
	struct pch_dev *chip = pci_get_drvdata(pdev);

	iowrite32(val, (&chip->regs->ch_event));
}
Пример #12
0
/* Ensure a device is has the fake IRQ handler "turned on/off" and is
 * ready to be exported. This MUST be run after xen_pcibk_reset_device
 * which does the actual PCI device enable/disable.
 */
static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
{
	struct xen_pcibk_dev_data *dev_data;
	int rc;
	int enable = 0;

	dev_data = pci_get_drvdata(dev);
	if (!dev_data)
		return;

	/* We don't deal with bridges */
	if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
		return;

	if (reset) {
		dev_data->enable_intx = 0;
		dev_data->ack_intr = 0;
	}
	enable =  dev_data->enable_intx;

	/* Asked to disable, but ISR isn't runnig */
	if (!enable && !dev_data->isr_on)
		return;

	/* Squirrel away the IRQs in the dev_data. We need this
	 * b/c when device transitions to MSI, the dev->irq is
	 * overwritten with the MSI vector.
	 */
	if (enable)
		dev_data->irq = dev->irq;

	/*
	 * SR-IOV devices in all use MSI-X and have no legacy
	 * interrupts, so inhibit creating a fake IRQ handler for them.
	 */
	if (dev_data->irq == 0)
		goto out;

	dev_dbg(&dev->dev, "%s: #%d %s %s%s %s-> %s\n",
		dev_data->irq_name,
		dev_data->irq,
		pci_is_enabled(dev) ? "on" : "off",
		dev->msi_enabled ? "MSI" : "",
		dev->msix_enabled ? "MSI/X" : "",
		dev_data->isr_on ? "enable" : "disable",
		enable ? "enable" : "disable");

	if (enable) {
		/*
		 * The MSI or MSI-X should not have an IRQ handler. Otherwise
		 * if the guest terminates we BUG_ON in free_msi_irqs.
		 */
		if (dev->msi_enabled || dev->msix_enabled)
			goto out;

		rc = request_irq(dev_data->irq,
				xen_pcibk_guest_interrupt, IRQF_SHARED,
				dev_data->irq_name, dev);
		if (rc) {
			dev_err(&dev->dev, "%s: failed to install fake IRQ " \
				"handler for IRQ %d! (rc:%d)\n",
				dev_data->irq_name, dev_data->irq, rc);
			goto out;
		}
	} else {
		free_irq(dev_data->irq, dev);
		dev_data->irq = 0;
	}
	dev_data->isr_on = enable;
	dev_data->ack_intr = enable;
out:
	dev_dbg(&dev->dev, "%s: #%d %s %s%s %s\n",
		dev_data->irq_name,
		dev_data->irq,
		pci_is_enabled(dev) ? "on" : "off",
		dev->msi_enabled ? "MSI" : "",
		dev->msix_enabled ? "MSI/X" : "",
		enable ? (dev_data->isr_on ? "enabled" : "failed to enable") :
			(dev_data->isr_on ? "failed to disable" : "disabled"));
}
Пример #13
0
static
int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
			  struct pci_dev *dev, struct xen_pci_op *op)
{
	struct xen_pcibk_dev_data *dev_data;
	int i, result;
	struct msix_entry *entries;
	u16 cmd;

	if (unlikely(verbose_request))
		printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
		       pci_name(dev));

	if (op->value > SH_INFO_MAX_VEC)
		return -EINVAL;

	if (dev->msix_enabled)
		return -EALREADY;

	/*
	 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
	 * to access the BARs where the MSI-X entries reside.
	 */
	pci_read_config_word(dev, PCI_COMMAND, &cmd);
	if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
		return -ENXIO;

	entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
	if (entries == NULL)
		return -ENOMEM;

	for (i = 0; i < op->value; i++) {
		entries[i].entry = op->msix_entries[i].entry;
		entries[i].vector = op->msix_entries[i].vector;
	}

	result = pci_enable_msix(dev, entries, op->value);

	if (result == 0) {
		for (i = 0; i < op->value; i++) {
			op->msix_entries[i].entry = entries[i].entry;
			if (entries[i].vector)
				op->msix_entries[i].vector =
					xen_pirq_from_irq(entries[i].vector);
				if (unlikely(verbose_request))
					printk(KERN_DEBUG DRV_NAME ": %s: " \
						"MSI-X[%d]: %d\n",
						pci_name(dev), i,
						op->msix_entries[i].vector);
		}
	} else
		pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n",
				    pci_name(dev), pdev->xdev->otherend_id,
				    result);
	kfree(entries);

	op->value = result;
	dev_data = pci_get_drvdata(dev);
	if (dev_data)
		dev_data->ack_intr = 0;

	return result > 0 ? 0 : result;
}
Пример #14
0
static ssize_t show_phy_flash_cfg(struct device *dev,
                                  struct device_attribute *attr, char *buf)
{
    struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
    return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
}
Пример #15
0
static int snd_atiixp_pcm_open(struct snd_pcm_substream *substream,
			       struct atiixp_dma *dma, int pcm_type)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	struct snd_pcm_runtime *runtime = substream->runtime;
	int err;
	static unsigned int rates[] = { 8000,  9600, 12000, 16000 };
	static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
		.count = ARRAY_SIZE(rates),
		.list = rates,
		.mask = 0,
	};

	if (snd_BUG_ON(!dma->ops || !dma->ops->enable_dma))
		return -EINVAL;

	if (dma->opened)
		return -EBUSY;
	dma->substream = substream;
	runtime->hw = snd_atiixp_pcm_hw;
	dma->ac97_pcm_type = pcm_type;
	if ((err = snd_pcm_hw_constraint_list(runtime, 0,
					      SNDRV_PCM_HW_PARAM_RATE,
					      &hw_constraints_rates)) < 0)
		return err;
	if ((err = snd_pcm_hw_constraint_integer(runtime,
						 SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
		return err;
	runtime->private_data = dma;

	/*                 */
	spin_lock_irq(&chip->reg_lock);
	dma->ops->enable_dma(chip, 1);
	spin_unlock_irq(&chip->reg_lock);
	dma->opened = 1;

	return 0;
}

static int snd_atiixp_pcm_close(struct snd_pcm_substream *substream,
				struct atiixp_dma *dma)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	/*                  */
	if (snd_BUG_ON(!dma->ops || !dma->ops->enable_dma))
		return -EINVAL;
	spin_lock_irq(&chip->reg_lock);
	dma->ops->enable_dma(chip, 0);
	spin_unlock_irq(&chip->reg_lock);
	dma->substream = NULL;
	dma->opened = 0;
	return 0;
}

/*
 */
static int snd_atiixp_playback_open(struct snd_pcm_substream *substream)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	int err;

	mutex_lock(&chip->open_mutex);
	err = snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_PLAYBACK], 0);
	mutex_unlock(&chip->open_mutex);
	if (err < 0)
		return err;
	return 0;
}

static int snd_atiixp_playback_close(struct snd_pcm_substream *substream)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	int err;
	mutex_lock(&chip->open_mutex);
	err = snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_PLAYBACK]);
	mutex_unlock(&chip->open_mutex);
	return err;
}

static int snd_atiixp_capture_open(struct snd_pcm_substream *substream)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	return snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_CAPTURE], 1);
}

static int snd_atiixp_capture_close(struct snd_pcm_substream *substream)
{
	struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
	return snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_CAPTURE]);
}


/*               */
static struct snd_pcm_ops snd_atiixp_playback_ops = {
	.open =		snd_atiixp_playback_open,
	.close =	snd_atiixp_playback_close,
	.ioctl =	snd_pcm_lib_ioctl,
	.hw_params =	snd_atiixp_pcm_hw_params,
	.hw_free =	snd_atiixp_pcm_hw_free,
	.prepare =	snd_atiixp_playback_prepare,
	.trigger =	snd_atiixp_pcm_trigger,
	.pointer =	snd_atiixp_pcm_pointer,
};

/*              */
static struct snd_pcm_ops snd_atiixp_capture_ops = {
	.open =		snd_atiixp_capture_open,
	.close =	snd_atiixp_capture_close,
	.ioctl =	snd_pcm_lib_ioctl,
	.hw_params =	snd_atiixp_pcm_hw_params,
	.hw_free =	snd_atiixp_pcm_hw_free,
	.prepare =	snd_atiixp_capture_prepare,
	.trigger =	snd_atiixp_pcm_trigger,
	.pointer =	snd_atiixp_pcm_pointer,
};

static struct atiixp_dma_ops snd_atiixp_playback_dma_ops = {
	.type = ATI_DMA_PLAYBACK,
	.llp_offset = ATI_REG_MODEM_OUT_DMA1_LINKPTR,
	.dt_cur = ATI_REG_MODEM_OUT_DMA1_DT_CUR,
	.enable_dma = atiixp_out_enable_dma,
	.enable_transfer = atiixp_out_enable_transfer,
	.flush_dma = atiixp_out_flush_dma,
};
	
static struct atiixp_dma_ops snd_atiixp_capture_dma_ops = {
	.type = ATI_DMA_CAPTURE,
	.llp_offset = ATI_REG_MODEM_IN_DMA_LINKPTR,
	.dt_cur = ATI_REG_MODEM_IN_DMA_DT_CUR,
	.enable_dma = atiixp_in_enable_dma,
	.enable_transfer = atiixp_in_enable_transfer,
	.flush_dma = atiixp_in_flush_dma,
};

static int __devinit snd_atiixp_pcm_new(struct atiixp_modem *chip)
{
	struct snd_pcm *pcm;
	int err;

	/*                      */
	chip->dmas[ATI_DMA_PLAYBACK].ops = &snd_atiixp_playback_dma_ops;
	chip->dmas[ATI_DMA_CAPTURE].ops = &snd_atiixp_capture_dma_ops;

	/*                    */
	err = snd_pcm_new(chip->card, "ATI IXP MC97", ATI_PCMDEV_ANALOG, 1, 1, &pcm);
	if (err < 0)
		return err;
	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_atiixp_playback_ops);
	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_atiixp_capture_ops);
	pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
	pcm->private_data = chip;
	strcpy(pcm->name, "ATI IXP MC97");
	chip->pcmdevs[ATI_PCMDEV_ANALOG] = pcm;

	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
					      snd_dma_pci_data(chip->pci),
					      64*1024, 128*1024);

	return 0;
}



/*
                    
 */
static irqreturn_t snd_atiixp_interrupt(int irq, void *dev_id)
{
	struct atiixp_modem *chip = dev_id;
	unsigned int status;

	status = atiixp_read(chip, ISR);

	if (! status)
		return IRQ_NONE;

	/*                   */
	if (status & ATI_REG_ISR_MODEM_OUT1_XRUN)
		snd_atiixp_xrun_dma(chip,  &chip->dmas[ATI_DMA_PLAYBACK]);
	else if (status & ATI_REG_ISR_MODEM_OUT1_STATUS)
		snd_atiixp_update_dma(chip, &chip->dmas[ATI_DMA_PLAYBACK]);
	if (status & ATI_REG_ISR_MODEM_IN_XRUN)
		snd_atiixp_xrun_dma(chip,  &chip->dmas[ATI_DMA_CAPTURE]);
	else if (status & ATI_REG_ISR_MODEM_IN_STATUS)
		snd_atiixp_update_dma(chip, &chip->dmas[ATI_DMA_CAPTURE]);

	/*                     */
	if (status & CODEC_CHECK_BITS) {
		unsigned int detected;
		detected = status & CODEC_CHECK_BITS;
		spin_lock(&chip->reg_lock);
		chip->codec_not_ready_bits |= detected;
		atiixp_update(chip, IER, detected, 0); /*                           */
		spin_unlock(&chip->reg_lock);
	}

	/*     */
	atiixp_write(chip, ISR, status);

	return IRQ_HANDLED;
}


/*
                     
 */

static int __devinit snd_atiixp_mixer_new(struct atiixp_modem *chip, int clock)
{
	struct snd_ac97_bus *pbus;
	struct snd_ac97_template ac97;
	int i, err;
	int codec_count;
	static struct snd_ac97_bus_ops ops = {
		.write = snd_atiixp_ac97_write,
		.read = snd_atiixp_ac97_read,
	};
	static unsigned int codec_skip[NUM_ATI_CODECS] = {
		ATI_REG_ISR_CODEC0_NOT_READY,
		ATI_REG_ISR_CODEC1_NOT_READY,
		ATI_REG_ISR_CODEC2_NOT_READY,
	};

	if (snd_atiixp_codec_detect(chip) < 0)
		return -ENXIO;

	if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus)) < 0)
		return err;
	pbus->clock = clock;
	chip->ac97_bus = pbus;

	codec_count = 0;
	for (i = 0; i < NUM_ATI_CODECS; i++) {
		if (chip->codec_not_ready_bits & codec_skip[i])
			continue;
		memset(&ac97, 0, sizeof(ac97));
		ac97.private_data = chip;
		ac97.pci = chip->pci;
		ac97.num = i;
		ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE;
		if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i])) < 0) {
			chip->ac97[i] = NULL; /*            */
			snd_printdd("atiixp-modem: codec %d not available for modem\n", i);
			continue;
		}
		codec_count++;
	}

	if (! codec_count) {
		snd_printk(KERN_ERR "atiixp-modem: no codec available\n");
		return -ENODEV;
	}

	/*                                                  */

	return 0;
}


#ifdef CONFIG_PM
/*
                   
 */
static int snd_atiixp_suspend(struct pci_dev *pci, pm_message_t state)
{
	struct snd_card *card = pci_get_drvdata(pci);
	struct atiixp_modem *chip = card->private_data;
	int i;

	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
	for (i = 0; i < NUM_ATI_PCMDEVS; i++)
		snd_pcm_suspend_all(chip->pcmdevs[i]);
	for (i = 0; i < NUM_ATI_CODECS; i++)
		snd_ac97_suspend(chip->ac97[i]);
	snd_atiixp_aclink_down(chip);
	snd_atiixp_chip_stop(chip);

	pci_disable_device(pci);
	pci_save_state(pci);
	pci_set_power_state(pci, pci_choose_state(pci, state));
	return 0;
}

static int snd_atiixp_resume(struct pci_dev *pci)
{
	struct snd_card *card = pci_get_drvdata(pci);
	struct atiixp_modem *chip = card->private_data;
	int i;

	pci_set_power_state(pci, PCI_D0);
	pci_restore_state(pci);
	if (pci_enable_device(pci) < 0) {
		printk(KERN_ERR "atiixp-modem: pci_enable_device failed, "
		       "disabling device\n");
		snd_card_disconnect(card);
		return -EIO;
	}
	pci_set_master(pci);

	snd_atiixp_aclink_reset(chip);
	snd_atiixp_chip_start(chip);

	for (i = 0; i < NUM_ATI_CODECS; i++)
		snd_ac97_resume(chip->ac97[i]);

	snd_power_change_state(card, SNDRV_CTL_POWER_D0);
	return 0;
}
#endif /*           */


#ifdef CONFIG_PROC_FS
/*
                                   
 */

static void snd_atiixp_proc_read(struct snd_info_entry *entry,
				 struct snd_info_buffer *buffer)
{
	struct atiixp_modem *chip = entry->private_data;
	int i;

	for (i = 0; i < 256; i += 4)
		snd_iprintf(buffer, "%02x: %08x\n", i, readl(chip->remap_addr + i));
}

static void __devinit snd_atiixp_proc_init(struct atiixp_modem *chip)
{
	struct snd_info_entry *entry;

	if (! snd_card_proc_new(chip->card, "atiixp-modem", &entry))
		snd_info_set_text_ops(entry, chip, snd_atiixp_proc_read);
}
#else
#define snd_atiixp_proc_init(chip)
#endif


/*
             
 */

static int snd_atiixp_free(struct atiixp_modem *chip)
{
	if (chip->irq < 0)
		goto __hw_end;
	snd_atiixp_chip_stop(chip);

      __hw_end:
	if (chip->irq >= 0)
		free_irq(chip->irq, chip);
	if (chip->remap_addr)
		iounmap(chip->remap_addr);
	pci_release_regions(chip->pci);
	pci_disable_device(chip->pci);
	kfree(chip);
	return 0;
}

static int snd_atiixp_dev_free(struct snd_device *device)
{
	struct atiixp_modem *chip = device->device_data;
	return snd_atiixp_free(chip);
}

/*
                                
 */
static int __devinit snd_atiixp_create(struct snd_card *card,
				       struct pci_dev *pci,
				       struct atiixp_modem **r_chip)
{
	static struct snd_device_ops ops = {
		.dev_free =	snd_atiixp_dev_free,
	};
	struct atiixp_modem *chip;
	int err;

	if ((err = pci_enable_device(pci)) < 0)
		return err;

	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
	if (chip == NULL) {
		pci_disable_device(pci);
		return -ENOMEM;
	}

	spin_lock_init(&chip->reg_lock);
	mutex_init(&chip->open_mutex);
	chip->card = card;
	chip->pci = pci;
	chip->irq = -1;
	if ((err = pci_request_regions(pci, "ATI IXP MC97")) < 0) {
		kfree(chip);
		pci_disable_device(pci);
		return err;
	}
	chip->addr = pci_resource_start(pci, 0);
	chip->remap_addr = pci_ioremap_bar(pci, 0);
	if (chip->remap_addr == NULL) {
		snd_printk(KERN_ERR "AC'97 space ioremap problem\n");
		snd_atiixp_free(chip);
		return -EIO;
	}

	if (request_irq(pci->irq, snd_atiixp_interrupt, IRQF_SHARED,
			KBUILD_MODNAME, chip)) {
		snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
		snd_atiixp_free(chip);
		return -EBUSY;
	}
	chip->irq = pci->irq;
	pci_set_master(pci);
	synchronize_irq(chip->irq);

	if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
		snd_atiixp_free(chip);
		return err;
	}

	snd_card_set_dev(card, &pci->dev);

	*r_chip = chip;
	return 0;
}


static int __devinit snd_atiixp_probe(struct pci_dev *pci,
				      const struct pci_device_id *pci_id)
{
	struct snd_card *card;
	struct atiixp_modem *chip;
	int err;

	err = snd_card_create(index, id, THIS_MODULE, 0, &card);
	if (err < 0)
		return err;

	strcpy(card->driver, "ATIIXP-MODEM");
	strcpy(card->shortname, "ATI IXP Modem");
	if ((err = snd_atiixp_create(card, pci, &chip)) < 0)
		goto __error;
	card->private_data = chip;

	if ((err = snd_atiixp_aclink_reset(chip)) < 0)
		goto __error;

	if ((err = snd_atiixp_mixer_new(chip, ac97_clock)) < 0)
		goto __error;

	if ((err = snd_atiixp_pcm_new(chip)) < 0)
		goto __error;
	
	snd_atiixp_proc_init(chip);

	snd_atiixp_chip_start(chip);

	sprintf(card->longname, "%s rev %x at 0x%lx, irq %i",
		card->shortname, pci->revision, chip->addr, chip->irq);

	if ((err = snd_card_register(card)) < 0)
		goto __error;

	pci_set_drvdata(pci, card);
	return 0;

 __error:
	snd_card_free(card);
	return err;
}

static void __devexit snd_atiixp_remove(struct pci_dev *pci)
{
	snd_card_free(pci_get_drvdata(pci));
	pci_set_drvdata(pci, NULL);
}
Пример #16
0
static void fnic_remove(struct pci_dev *pdev)
{
	struct fnic *fnic = pci_get_drvdata(pdev);
	struct fc_lport *lp = fnic->lport;
	unsigned long flags;

	/*
	 * Mark state so that the workqueue thread stops forwarding
	 * received frames and link events to the local port. ISR and
	 * other threads that can queue work items will also stop
	 * creating work items on the fnic workqueue
	 */
	spin_lock_irqsave(&fnic->fnic_lock, flags);
	fnic->stop_rx_link_events = 1;
	spin_unlock_irqrestore(&fnic->fnic_lock, flags);

	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		del_timer_sync(&fnic->notify_timer);

	/*
	 * Flush the fnic event queue. After this call, there should
	 * be no event queued for this fnic device in the workqueue
	 */
	flush_workqueue(fnic_event_queue);
	skb_queue_purge(&fnic->frame_queue);
	skb_queue_purge(&fnic->tx_queue);

	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
		del_timer_sync(&fnic->fip_timer);
		skb_queue_purge(&fnic->fip_frame_queue);
		fnic_fcoe_reset_vlans(fnic);
		fnic_fcoe_evlist_free(fnic);
	}

	/*
	 * Log off the fabric. This stops all remote ports, dns port,
	 * logs off the fabric. This flushes all rport, disc, lport work
	 * before returning
	 */
	fc_fabric_logoff(fnic->lport);

	spin_lock_irqsave(&fnic->fnic_lock, flags);
	fnic->in_remove = 1;
	spin_unlock_irqrestore(&fnic->fnic_lock, flags);

	fcoe_ctlr_destroy(&fnic->ctlr);
	fc_lport_destroy(lp);
	fnic_stats_debugfs_remove(fnic);

	/*
	 * This stops the fnic device, masks all interrupts. Completed
	 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
	 * cleaned up
	 */
	fnic_cleanup(fnic);

	BUG_ON(!skb_queue_empty(&fnic->frame_queue));
	BUG_ON(!skb_queue_empty(&fnic->tx_queue));

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_del(&fnic->list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

	fc_remove_host(fnic->lport->host);
	scsi_remove_host(fnic->lport->host);
	fc_exch_mgr_free(fnic->lport);
	vnic_dev_notify_unset(fnic->vdev);
	fnic_free_intr(fnic);
	fnic_free_vnic_resources(fnic);
	fnic_clear_intr_mode(fnic);
	vnic_dev_close(fnic->vdev);
	vnic_dev_unregister(fnic->vdev);
	fnic_iounmap(fnic);
	pci_release_regions(pdev);
	pci_disable_device(pdev);
	scsi_host_put(lp->host);
}
Пример #17
0
static int
radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	return radeon_suspend_kms(dev, state);
}
Пример #18
0
static int lgdt330x_pll_set(struct dvb_frontend* fe,
			    struct dvb_frontend_parameters* params)
{
	/* FIXME make this routine use the tuner-simple code.
	 * It could probably be shared with a number of ATSC
	 * frontends. Many share the same tuner with analog TV. */

	struct cx8802_dev *dev= fe->dvb->priv;
	struct cx88_core *core = dev->core;
	u8 buf[4];
	struct i2c_msg msg =
		{ .addr = dev->core->pll_addr, .flags = 0, .buf = buf, .len = 4 };
	int err;

	/* Put the analog decoder in standby to keep it quiet */
	cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL);

	dvb_pll_configure(core->pll_desc, buf, params->frequency, 0);
	dprintk(1, "%s: tuner at 0x%02x bytes: 0x%02x 0x%02x 0x%02x 0x%02x\n",
			__FUNCTION__, msg.addr, buf[0],buf[1],buf[2],buf[3]);
	if ((err = i2c_transfer(&core->i2c_adap, &msg, 1)) != 1) {
		printk(KERN_WARNING "cx88-dvb: %s error "
			   "(addr %02x <- %02x, err = %i)\n",
			   __FUNCTION__, buf[0], buf[1], err);
		if (err < 0)
			return err;
		else
			return -EREMOTEIO;
	}
	if (core->tuner_type == TUNER_LG_TDVS_H062F) {
		/* Set the Auxiliary Byte. */
		buf[2] &= ~0x20;
		buf[2] |= 0x18;
		buf[3] = 0x50;
		i2c_transfer(&core->i2c_adap, &msg, 1);
	}
	return 0;
}

static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index)
{
	struct cx8802_dev *dev= fe->dvb->priv;
	struct cx88_core *core = dev->core;

	dprintk(1, "%s: index = %d\n", __FUNCTION__, index);
	if (index == 0)
		cx_clear(MO_GP0_IO, 8);
	else
		cx_set(MO_GP0_IO, 8);
	return 0;
}

static int lgdt330x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
{
	struct cx8802_dev *dev= fe->dvb->priv;
	if (is_punctured)
		dev->ts_gen_cntrl |= 0x04;
	else
		dev->ts_gen_cntrl &= ~0x04;
	return 0;
}

static struct lgdt330x_config fusionhdtv_3_gold = {
	.demod_address    = 0x0e,
	.demod_chip       = LGDT3302,
	.serial_mpeg      = 0x04, /* TPSERIAL for 3302 in TOP_CONTROL */
	.pll_set          = lgdt330x_pll_set,
	.set_ts_params    = lgdt330x_set_ts_param,
};

static struct lgdt330x_config fusionhdtv_5_gold = {
	.demod_address    = 0x0e,
	.demod_chip       = LGDT3303,
	.serial_mpeg      = 0x40, /* TPSERIAL for 3303 in TOP_CONTROL */
	.pll_set          = lgdt330x_pll_set,
	.set_ts_params    = lgdt330x_set_ts_param,
};
#endif

static int dvb_register(struct cx8802_dev *dev)
{
	/* init struct videobuf_dvb */
	dev->dvb.name = dev->core->name;
	dev->ts_gen_cntrl = 0x0c;

	/* init frontend */
	switch (dev->core->board) {
#ifdef HAVE_CX22702
	case CX88_BOARD_HAUPPAUGE_DVB_T1:
		dev->dvb.frontend = cx22702_attach(&hauppauge_novat_config,
						   &dev->core->i2c_adap);
		break;
	case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1:
	case CX88_BOARD_CONEXANT_DVB_T1:
		dev->dvb.frontend = cx22702_attach(&connexant_refboard_config,
						   &dev->core->i2c_adap);
		break;
#endif
#ifdef HAVE_MT352
	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
		dev->core->pll_addr = 0x61;
		dev->core->pll_desc = &dvb_pll_lg_z201;
		dev->dvb.frontend = mt352_attach(&dvico_fusionhdtv,
						 &dev->core->i2c_adap);
		break;
	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
		dev->core->pll_addr = 0x60;
		dev->core->pll_desc = &dvb_pll_thomson_dtt7579;
		dev->dvb.frontend = mt352_attach(&dvico_fusionhdtv,
						 &dev->core->i2c_adap);
		break;
	case CX88_BOARD_KWORLD_DVB_T:
	case CX88_BOARD_DNTV_LIVE_DVB_T:
	case CX88_BOARD_ADSTECH_DVB_T_PCI:
		dev->core->pll_addr = 0x61;
		dev->core->pll_desc = &dvb_pll_unknown_1;
		dev->dvb.frontend = mt352_attach(&dntv_live_dvbt_config,
						 &dev->core->i2c_adap);
		break;
#endif
#ifdef HAVE_OR51132
	case CX88_BOARD_PCHDTV_HD3000:
		dev->dvb.frontend = or51132_attach(&pchdtv_hd3000,
						 &dev->core->i2c_adap);
		break;
#endif
#ifdef HAVE_LGDT330X
	case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q:
		dev->ts_gen_cntrl = 0x08;
		{
		/* Do a hardware reset of chip before using it. */
		struct cx88_core *core = dev->core;

		cx_clear(MO_GP0_IO, 1);
		mdelay(100);
		cx_set(MO_GP0_IO, 1);
		mdelay(200);

		/* Select RF connector callback */
		fusionhdtv_3_gold.pll_rf_set = lgdt330x_pll_rf_set;
		dev->core->pll_addr = 0x61;
		dev->core->pll_desc = &dvb_pll_microtune_4042;
		dev->dvb.frontend = lgdt330x_attach(&fusionhdtv_3_gold,
						    &dev->core->i2c_adap);
		}
		break;
	case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_T:
		dev->ts_gen_cntrl = 0x08;
		{
		/* Do a hardware reset of chip before using it. */
		struct cx88_core *core = dev->core;

		cx_clear(MO_GP0_IO, 1);
		mdelay(100);
		cx_set(MO_GP0_IO, 9);
		mdelay(200);
		dev->core->pll_addr = 0x61;
		dev->core->pll_desc = &dvb_pll_thomson_dtt7611;
		dev->dvb.frontend = lgdt330x_attach(&fusionhdtv_3_gold,
						    &dev->core->i2c_adap);
		}
		break;
	case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD:
		dev->ts_gen_cntrl = 0x08;
		{
		/* Do a hardware reset of chip before using it. */
		struct cx88_core *core = dev->core;

		cx_clear(MO_GP0_IO, 1);
		mdelay(100);
		cx_set(MO_GP0_IO, 1);
		mdelay(200);
		dev->core->pll_addr = 0x61;
		dev->core->pll_desc = &dvb_pll_tdvs_tua6034;
		dev->dvb.frontend = lgdt330x_attach(&fusionhdtv_5_gold,
						    &dev->core->i2c_adap);
		}
		break;
#endif
	default:
		printk("%s: The frontend of your DVB/ATSC card isn't supported yet\n",
		       dev->core->name);
		break;
	}
	if (NULL == dev->dvb.frontend) {
		printk("%s: frontend initialization failed\n",dev->core->name);
		return -1;
	}

	if (dev->core->pll_desc) {
		dev->dvb.frontend->ops->info.frequency_min = dev->core->pll_desc->min;
		dev->dvb.frontend->ops->info.frequency_max = dev->core->pll_desc->max;
	}

	/* Put the analog decoder in standby to keep it quiet */
	cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL);

	/* register everything */
	return videobuf_dvb_register(&dev->dvb, THIS_MODULE, dev);
}

/* ----------------------------------------------------------- */

static int __devinit dvb_probe(struct pci_dev *pci_dev,
			       const struct pci_device_id *pci_id)
{
	struct cx8802_dev *dev;
	struct cx88_core  *core;
	int err;

	/* general setup */
	core = cx88_core_get(pci_dev);
	if (NULL == core)
		return -EINVAL;

	err = -ENODEV;
	if (!cx88_boards[core->board].dvb)
		goto fail_core;

	err = -ENOMEM;
	dev = kmalloc(sizeof(*dev),GFP_KERNEL);
	if (NULL == dev)
		goto fail_core;
	memset(dev,0,sizeof(*dev));
	dev->pci = pci_dev;
	dev->core = core;

	err = cx8802_init_common(dev);
	if (0 != err)
		goto fail_free;

	/* dvb stuff */
	printk("%s/2: cx2388x based dvb card\n", core->name);
	videobuf_queue_init(&dev->dvb.dvbq, &dvb_qops,
			    dev->pci, &dev->slock,
			    V4L2_BUF_TYPE_VIDEO_CAPTURE,
			    V4L2_FIELD_TOP,
			    sizeof(struct cx88_buffer),
			    dev);
	err = dvb_register(dev);
	if (0 != err)
		goto fail_fini;
	return 0;

 fail_fini:
	cx8802_fini_common(dev);
 fail_free:
	kfree(dev);
 fail_core:
	cx88_core_put(core,pci_dev);
	return err;
}

static void __devexit dvb_remove(struct pci_dev *pci_dev)
{
        struct cx8802_dev *dev = pci_get_drvdata(pci_dev);

	/* dvb */
	videobuf_dvb_unregister(&dev->dvb);

	/* common */
	cx8802_fini_common(dev);
	cx88_core_put(dev->core,dev->pci);
	kfree(dev);
}

static struct pci_device_id cx8802_pci_tbl[] = {
	{
		.vendor       = 0x14f1,
		.device       = 0x8802,
                .subvendor    = PCI_ANY_ID,
                .subdevice    = PCI_ANY_ID,
	},{
		/* --- end of list --- */
	}
};
Пример #19
0
static int rt_pci_resume(struct pci_dev *pci_dev)
{
	struct net_device *net_dev = pci_get_drvdata(pci_dev);
	VOID *pAd = NULL;

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
	INT32 retval;


	/*
		Set the power state of a PCI device

		PCI has 4 power states, DO (normal) ~ D3(less power)
		you can find that in include/linux/pci.h
		#define PCI_D0          ((pci_power_t __force) 0)
		#define PCI_D1          ((pci_power_t __force) 1)
		#define PCI_D2          ((pci_power_t __force) 2)
		#define PCI_D3hot       ((pci_power_t __force) 3)
		#define PCI_D3cold      ((pci_power_t __force) 4)
		#define PCI_UNKNOWN     ((pci_power_t __force) 5)
		#define PCI_POWER_ERROR ((pci_power_t __force) -1)
	*/
	retval = pci_set_power_state(pci_dev, PCI_D0);

	/* restore the saved state of a PCI device */
	pci_restore_state(pci_dev);

	/* initialize device before it's used by a driver */
	if (pci_enable_device(pci_dev))
	{
		printk("pci enable fail!\n");
		return 0;
	}
#endif

	DBGPRINT(RT_DEBUG_TRACE, ("===>%s()\n", __FUNCTION__));

	if (net_dev == NULL)
	{
		DBGPRINT(RT_DEBUG_ERROR, ("net_dev == NULL!\n"));
	}
	else
		GET_PAD_FROM_NET_DEV(pAd, net_dev);

	if (pAd != NULL)
	{
		ULONG IfNum;

		/*
			we can not use IFF_UP because ra0 down but ra1 up
			and 1 suspend/resume function for 1 module, not for each interface
			so Linux will call suspend/resume function once
		*/
		RTMP_DRIVER_VIRTUAL_INF_NUM_GET(pAd, &IfNum);
		if (IfNum > 0)
/*		if (VIRTUAL_IF_NUM(pAd) > 0) */
		{
			/* mark device as attached from system and restart if needed */
			netif_device_attach(net_dev);

			if (rt28xx_open((PNET_DEV)net_dev) != 0)
			{
				/* open fail */
				DBGPRINT(RT_DEBUG_TRACE, ("<===%s()\n", __FUNCTION__));
				return 0;
			}

			/* increase MODULE use count */
			RT_MOD_INC_USE_COUNT();

/*			RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS); */
/*			RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_RADIO_OFF); */
			RTMP_DRIVER_PCI_RESUME(pAd);

			netif_start_queue(net_dev);
			netif_carrier_on(net_dev);
			netif_wake_queue(net_dev);
		}
	}

	DBGPRINT(RT_DEBUG_TRACE, ("<=== %s()\n", __FUNCTION__));
	return 0;
}
Пример #20
0
static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct sja1000_priv *priv;
	struct peak_pci_chan *chan;
	struct net_device *dev, *prev_dev;
	void __iomem *cfg_base, *reg_base;
	u16 sub_sys_id, icr;
	int i, err, channels;

	err = pci_enable_device(pdev);
	if (err)
		return err;

	err = pci_request_regions(pdev, DRV_NAME);
	if (err)
		goto failure_disable_pci;

	err = pci_read_config_word(pdev, 0x2e, &sub_sys_id);
	if (err)
		goto failure_release_regions;

	dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n",
		pdev->vendor, pdev->device, sub_sys_id);

	err = pci_write_config_word(pdev, 0x44, 0);
	if (err)
		goto failure_release_regions;

	if (sub_sys_id >= 12)
		channels = 4;
	else if (sub_sys_id >= 10)
		channels = 3;
	else if (sub_sys_id >= 4)
		channels = 2;
	else
		channels = 1;

	cfg_base = pci_iomap(pdev, 0, PEAK_PCI_CFG_SIZE);
	if (!cfg_base) {
		dev_err(&pdev->dev, "failed to map PCI resource #0\n");
		err = -ENOMEM;
		goto failure_release_regions;
	}

	reg_base = pci_iomap(pdev, 1, PEAK_PCI_CHAN_SIZE * channels);
	if (!reg_base) {
		dev_err(&pdev->dev, "failed to map PCI resource #1\n");
		err = -ENOMEM;
		goto failure_unmap_cfg_base;
	}

	/* Set GPIO control register */
	writew(0x0005, cfg_base + PITA_GPIOICR + 2);
	/* Enable all channels of this card */
	writeb(0x00, cfg_base + PITA_GPIOICR);
	/* Toggle reset */
	writeb(0x05, cfg_base + PITA_MISC + 3);
	mdelay(5);
	/* Leave parport mux mode */
	writeb(0x04, cfg_base + PITA_MISC + 3);

	icr = readw(cfg_base + PITA_ICR + 2);

	for (i = 0; i < channels; i++) {
		dev = alloc_sja1000dev(sizeof(struct peak_pci_chan));
		if (!dev) {
			err = -ENOMEM;
			goto failure_remove_channels;
		}

		priv = netdev_priv(dev);
		chan = priv->priv;

		chan->cfg_base = cfg_base;
		priv->reg_base = reg_base + i * PEAK_PCI_CHAN_SIZE;

		priv->read_reg = peak_pci_read_reg;
		priv->write_reg = peak_pci_write_reg;
		priv->post_irq = peak_pci_post_irq;

		priv->can.clock.freq = PEAK_PCI_CAN_CLOCK;
		priv->ocr = PEAK_PCI_OCR;
		priv->cdr = PEAK_PCI_CDR;
		/* Neither a slave nor a single device distributes the clock */
		if (channels == 1 || i > 0)
			priv->cdr |= CDR_CLK_OFF;

		/* Setup interrupt handling */
		priv->irq_flags = IRQF_SHARED;
		dev->irq = pdev->irq;

		chan->icr_mask = peak_pci_icr_masks[i];
		icr |= chan->icr_mask;

		SET_NETDEV_DEV(dev, &pdev->dev);
		dev->dev_id = i;

		/* Create chain of SJA1000 devices */
		chan->prev_dev = pci_get_drvdata(pdev);
		pci_set_drvdata(pdev, dev);

		/*
		 * PCAN-ExpressCard needs some additional i2c init.
		 * This must be done *before* register_sja1000dev() but
		 * *after* devices linkage
		 */
		if (pdev->device == PEAK_PCIEC_DEVICE_ID ||
		    pdev->device == PEAK_PCIEC34_DEVICE_ID) {
			err = peak_pciec_probe(pdev, dev);
			if (err) {
				dev_err(&pdev->dev,
					"failed to probe device (err %d)\n",
					err);
				goto failure_free_dev;
			}
		}

		err = register_sja1000dev(dev);
		if (err) {
			dev_err(&pdev->dev, "failed to register device\n");
			goto failure_free_dev;
		}

		dev_info(&pdev->dev,
			 "%s at reg_base=0x%p cfg_base=0x%p irq=%d\n",
			 dev->name, priv->reg_base, chan->cfg_base, dev->irq);
	}

	/* Enable interrupts */
	writew(icr, cfg_base + PITA_ICR + 2);

	return 0;

failure_free_dev:
	pci_set_drvdata(pdev, chan->prev_dev);
	free_sja1000dev(dev);

failure_remove_channels:
	/* Disable interrupts */
	writew(0x0, cfg_base + PITA_ICR + 2);

	chan = NULL;
	for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
		priv = netdev_priv(dev);
		chan = priv->priv;
		prev_dev = chan->prev_dev;

		unregister_sja1000dev(dev);
		free_sja1000dev(dev);
	}

	/* free any PCIeC resources too */
	if (chan && chan->pciec_card)
		peak_pciec_remove(chan->pciec_card);

	pci_iounmap(pdev, reg_base);

failure_unmap_cfg_base:
	pci_iounmap(pdev, cfg_base);

failure_release_regions:
	pci_release_regions(pdev);

failure_disable_pci:
	pci_disable_device(pdev);

	return err;
}
Пример #21
0
static int __devinit cx18_probe(struct pci_dev *dev,
				const struct pci_device_id *pci_id)
{
	int retval = 0;
	int vbi_buf_size;
	u32 devtype;
	struct cx18 *cx;

	spin_lock(&cx18_cards_lock);

	/* Make sure we've got a place for this card */
	if (cx18_cards_active == CX18_MAX_CARDS) {
		printk(KERN_ERR "cx18:  Maximum number of cards detected (%d).\n",
			      cx18_cards_active);
		spin_unlock(&cx18_cards_lock);
		return -ENOMEM;
	}

	cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
	if (!cx) {
		spin_unlock(&cx18_cards_lock);
		return -ENOMEM;
	}
	cx18_cards[cx18_cards_active] = cx;
	cx->dev = dev;
	cx->num = cx18_cards_active++;
	snprintf(cx->name, sizeof(cx->name), "cx18-%d", cx->num);
	CX18_INFO("Initializing card #%d\n", cx->num);

	spin_unlock(&cx18_cards_lock);

	cx18_process_options(cx);
	if (cx->options.cardtype == -1) {
		retval = -ENODEV;
		goto err;
	}
	if (cx18_init_struct1(cx)) {
		retval = -ENOMEM;
		goto err;
	}

	CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);

	/* PCI Device Setup */
	retval = cx18_setup_pci(cx, dev, pci_id);
	if (retval != 0) {
		if (retval == -EIO)
			goto free_workqueue;
		else if (retval == -ENXIO)
			goto free_mem;
	}
	/* save cx in the pci struct for later use */
	pci_set_drvdata(dev, cx);

	/* map io memory */
	CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
		   cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
	cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
				       CX18_MEM_SIZE);
	if (!cx->enc_mem) {
		CX18_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
		CX18_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n");
		retval = -ENOMEM;
		goto free_mem;
	}
	cx->reg_mem = cx->enc_mem + CX18_REG_OFFSET;
	devtype = cx18_read_reg(cx, 0xC72028);
	switch (devtype & 0xff000000) {
	case 0xff000000:
		CX18_INFO("cx23418 revision %08x (A)\n", devtype);
		break;
	case 0x01000000:
		CX18_INFO("cx23418 revision %08x (B)\n", devtype);
		break;
	default:
		CX18_INFO("cx23418 revision %08x (Unknown)\n", devtype);
		break;
	}

	cx18_init_power(cx, 1);
	cx18_init_memory(cx);

	cx->scb = (struct cx18_scb __iomem *)(cx->enc_mem + SCB_OFFSET);
	cx18_init_scb(cx);

	cx18_gpio_init(cx);

	/* active i2c  */
	CX18_DEBUG_INFO("activating i2c...\n");
	if (init_cx18_i2c(cx)) {
		CX18_ERR("Could not initialize i2c\n");
		goto free_map;
	}

	CX18_DEBUG_INFO("Active card count: %d.\n", cx18_cards_active);

	if (cx->card->hw_all & CX18_HW_TVEEPROM) {
		/* Based on the model number the cardtype may be changed.
		   The PCI IDs are not always reliable. */
		cx18_process_eeprom(cx);
	}
	if (cx->card->comment)
		CX18_INFO("%s", cx->card->comment);
	if (cx->card->v4l2_capabilities == 0) {
		retval = -ENODEV;
		goto free_i2c;
	}
	cx18_init_memory(cx);

	/* Register IRQ */
	retval = request_irq(cx->dev->irq, cx18_irq_handler,
			     IRQF_SHARED | IRQF_DISABLED, cx->name, (void *)cx);
	if (retval) {
		CX18_ERR("Failed to register irq %d\n", retval);
		goto free_i2c;
	}

	if (cx->std == 0)
		cx->std = V4L2_STD_NTSC_M;

	if (cx->options.tuner == -1) {
		int i;

		for (i = 0; i < CX18_CARD_MAX_TUNERS; i++) {
			if ((cx->std & cx->card->tuners[i].std) == 0)
				continue;
			cx->options.tuner = cx->card->tuners[i].tuner;
			break;
		}
	}
	/* if no tuner was found, then pick the first tuner in the card list */
	if (cx->options.tuner == -1 && cx->card->tuners[0].std) {
		cx->std = cx->card->tuners[0].std;
		if (cx->std & V4L2_STD_PAL)
			cx->std = V4L2_STD_PAL_BG | V4L2_STD_PAL_H;
		else if (cx->std & V4L2_STD_NTSC)
			cx->std = V4L2_STD_NTSC_M;
		else if (cx->std & V4L2_STD_SECAM)
			cx->std = V4L2_STD_SECAM_L;
		cx->options.tuner = cx->card->tuners[0].tuner;
	}
	if (cx->options.radio == -1)
		cx->options.radio = (cx->card->radio_input.audio_type != 0);

	/* The card is now fully identified, continue with card-specific
	   initialization. */
	cx18_init_struct2(cx);

	cx18_load_and_init_modules(cx);

	if (cx->std & V4L2_STD_525_60) {
		cx->is_60hz = 1;
		cx->is_out_60hz = 1;
	} else {
		cx->is_50hz = 1;
		cx->is_out_50hz = 1;
	}
	cx->params.video_gop_size = cx->is_60hz ? 15 : 12;

	cx->stream_buf_size[CX18_ENC_STREAM_TYPE_MPG] = 0x08000;
	cx->stream_buf_size[CX18_ENC_STREAM_TYPE_TS] = 0x08000;
	cx->stream_buf_size[CX18_ENC_STREAM_TYPE_PCM] = 0x01200;
	cx->stream_buf_size[CX18_ENC_STREAM_TYPE_YUV] = 0x20000;
	vbi_buf_size = cx->vbi.raw_size * (cx->is_60hz ? 24 : 36) / 2;
	cx->stream_buf_size[CX18_ENC_STREAM_TYPE_VBI] = vbi_buf_size;

	if (cx->options.radio > 0)
		cx->v4l2_cap |= V4L2_CAP_RADIO;

	if (cx->options.tuner > -1) {
		struct tuner_setup setup;

		setup.addr = ADDR_UNSET;
		setup.type = cx->options.tuner;
		setup.mode_mask = T_ANALOG_TV;  /* matches TV tuners */
		setup.tuner_callback = (setup.type == TUNER_XC2028) ?
			cx18_reset_tuner_gpio : NULL;
		cx18_call_i2c_clients(cx, TUNER_SET_TYPE_ADDR, &setup);
		if (setup.type == TUNER_XC2028) {
			static struct xc2028_ctrl ctrl = {
				.fname = XC2028_DEFAULT_FIRMWARE,
				.max_len = 64,
			};
			struct v4l2_priv_tun_config cfg = {
				.tuner = cx->options.tuner,
				.priv = &ctrl,
			};
			cx18_call_i2c_clients(cx, TUNER_SET_CONFIG, &cfg);
		}
	}

	/* The tuner is fixed to the standard. The other inputs (e.g. S-Video)
	   are not. */
	cx->tuner_std = cx->std;

	retval = cx18_streams_setup(cx);
	if (retval) {
		CX18_ERR("Error %d setting up streams\n", retval);
		goto free_irq;
	}
	retval = cx18_streams_register(cx);
	if (retval) {
		CX18_ERR("Error %d registering devices\n", retval);
		goto free_streams;
	}

	CX18_INFO("Initialized card #%d: %s\n", cx->num, cx->card_name);

	return 0;

free_streams:
	cx18_streams_cleanup(cx, 1);
free_irq:
	free_irq(cx->dev->irq, (void *)cx);
free_i2c:
	exit_cx18_i2c(cx);
free_map:
	cx18_iounmap(cx);
free_mem:
	release_mem_region(cx->base_addr, CX18_MEM_SIZE);
free_workqueue:
err:
	if (retval == 0)
		retval = -ENODEV;
	CX18_ERR("Error %d on initialization\n", retval);
	cx18_log_statistics(cx);

	kfree(cx18_cards[cx18_cards_active]);
	cx18_cards[cx18_cards_active] = NULL;
	return retval;
}

int cx18_init_on_first_open(struct cx18 *cx)
{
	int video_input;
	int fw_retry_count = 3;
	struct v4l2_frequency vf;
	struct cx18_open_id fh;

	fh.cx = cx;

	if (test_bit(CX18_F_I_FAILED, &cx->i_flags))
		return -ENXIO;

	if (test_and_set_bit(CX18_F_I_INITED, &cx->i_flags))
		return 0;

	while (--fw_retry_count > 0) {
		/* load firmware */
		if (cx18_firmware_init(cx) == 0)
			break;
		if (fw_retry_count > 1)
			CX18_WARN("Retry loading firmware\n");
	}

	if (fw_retry_count == 0) {
		set_bit(CX18_F_I_FAILED, &cx->i_flags);
		return -ENXIO;
	}
	set_bit(CX18_F_I_LOADED_FW, &cx->i_flags);

	/* Init the firmware twice to work around a silicon bug
	 * transport related. */

	fw_retry_count = 3;
	while (--fw_retry_count > 0) {
		/* load firmware */
		if (cx18_firmware_init(cx) == 0)
			break;
		if (fw_retry_count > 1)
			CX18_WARN("Retry loading firmware\n");
	}

	if (fw_retry_count == 0) {
		set_bit(CX18_F_I_FAILED, &cx->i_flags);
		return -ENXIO;
	}

	vf.tuner = 0;
	vf.type = V4L2_TUNER_ANALOG_TV;
	vf.frequency = 6400; /* the tuner 'baseline' frequency */

	/* Set initial frequency. For PAL/SECAM broadcasts no
	   'default' channel exists AFAIK. */
	if (cx->std == V4L2_STD_NTSC_M_JP)
		vf.frequency = 1460;	/* ch. 1 91250*16/1000 */
	else if (cx->std & V4L2_STD_NTSC_M)
		vf.frequency = 1076;	/* ch. 4 67250*16/1000 */

	video_input = cx->active_input;
	cx->active_input++;	/* Force update of input */
	cx18_s_input(NULL, &fh, video_input);

	/* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
	   in one place. */
	cx->std++;		/* Force full standard initialization */
	cx18_s_std(NULL, &fh, &cx->tuner_std);
	cx18_s_frequency(NULL, &fh, &vf);
	return 0;
}

static void cx18_remove(struct pci_dev *pci_dev)
{
	struct cx18 *cx = pci_get_drvdata(pci_dev);

	CX18_DEBUG_INFO("Removing Card #%d\n", cx->num);

	/* Stop all captures */
	CX18_DEBUG_INFO("Stopping all streams\n");
	if (atomic_read(&cx->tot_capturing) > 0)
		cx18_stop_all_captures(cx);

	/* Interrupts */
	cx18_sw1_irq_disable(cx, IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU);
	cx18_sw2_irq_disable(cx, IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK);

	cx18_halt_firmware(cx);

	cx18_streams_cleanup(cx, 1);

	exit_cx18_i2c(cx);

	free_irq(cx->dev->irq, (void *)cx);

	cx18_iounmap(cx);

	release_mem_region(cx->base_addr, CX18_MEM_SIZE);

	pci_disable_device(cx->dev);

	cx18_log_statistics(cx);
	CX18_INFO("Removed %s, card #%d\n", cx->card_name, cx->num);
}

/* define a pci_driver for card detection */
static struct pci_driver cx18_pci_driver = {
      .name =     "cx18",
      .id_table = cx18_pci_tbl,
      .probe =    cx18_probe,
      .remove =   cx18_remove,
};

static int module_start(void)
{
	printk(KERN_INFO "cx18:  Start initialization, version %s\n", CX18_VERSION);

	memset(cx18_cards, 0, sizeof(cx18_cards));

	/* Validate parameters */
	if (cx18_first_minor < 0 || cx18_first_minor >= CX18_MAX_CARDS) {
		printk(KERN_ERR "cx18:  Exiting, cx18_first_minor must be between 0 and %d\n",
		     CX18_MAX_CARDS - 1);
		return -1;
	}

	if (cx18_debug < 0 || cx18_debug > 511) {
		cx18_debug = 0;
		printk(KERN_INFO "cx18:   Debug value must be >= 0 and <= 511!\n");
	}

	if (pci_register_driver(&cx18_pci_driver)) {
		printk(KERN_ERR "cx18:   Error detecting PCI card\n");
		return -ENODEV;
	}
	printk(KERN_INFO "cx18:  End initialization\n");
	return 0;
}

static void module_cleanup(void)
{
	int i;

	pci_unregister_driver(&cx18_pci_driver);

	for (i = 0; i < cx18_cards_active; i++) {
		if (cx18_cards[i] == NULL)
			continue;
		kfree(cx18_cards[i]);
	}
}
Пример #22
0
static void mcb_pci_remove(struct pci_dev *pdev)
{
	struct priv *priv = pci_get_drvdata(pdev);

	mcb_release_bus(priv->bus);
}
Пример #23
0
static int ast_pm_thaw(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *ddev = pci_get_drvdata(pdev);
	return ast_drm_thaw(ddev);
}
Пример #24
0
static void snd_trident_remove(struct pci_dev *pci)
{
	snd_card_free(pci_get_drvdata(pci));
	pci_set_drvdata(pci, NULL);
}
Пример #25
0
/* Make sure the controller is quiescent and that we're not using it
 * any more.  This is mainly for the benefit of programs which, like kexec,
 * expect the hardware to be idle: not doing DMA or generating IRQs.
 *
 * This routine may be called in a damaged or failing kernel.  Hence we
 * do not acquire the spinlock before shutting down the controller.
 */
static void uhci_shutdown(struct pci_dev *pdev)
{
	struct usb_hcd *hcd = pci_get_drvdata(pdev);

	uhci_hc_died(hcd_to_uhci(hcd));
}
Пример #26
0
static void dwc3_haps_remove(struct pci_dev *pci)
{
	struct dwc3_haps *dwc = pci_get_drvdata(pci);

	platform_device_unregister(dwc->dwc3);
}
Пример #27
0
static void __devexit snd_via82xx_remove(struct pci_dev *pci)
{
	snd_card_free(pci_get_drvdata(pci));
	pci_set_drvdata(pci, NULL);
}
Пример #28
0
/**
 * usb_hcd_pci_suspend - power management suspend of a PCI-based HCD
 * @dev: USB Host Controller being suspended
 * @state: state that the controller is going into
 *
 * Store this function in the HCD's struct pci_driver as suspend().
 */
int usb_hcd_pci_suspend (struct pci_dev *dev, u32 state)
{
    struct usb_hcd		*hcd;
    int			retval = 0;
    int			has_pci_pm;

    hcd = pci_get_drvdata(dev);

    /* even when the PCI layer rejects some of the PCI calls
     * below, HCs can try global suspend and reduce DMA traffic.
     * PM-sensitive HCDs may already have done this.
     */
    has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
    if (state > 4)
        state = 4;

    switch (hcd->state) {

    /* entry if root hub wasn't yet suspended ... from sysfs,
     * without autosuspend, or if USB_SUSPEND isn't configured.
     */
    case USB_STATE_RUNNING:
        hcd->state = USB_STATE_QUIESCING;
        retval = hcd->driver->suspend (hcd, state);
        if (retval) {
            dev_dbg (hcd->self.controller,
                     "suspend fail, retval %d\n",
                     retval);
            break;
        }
        hcd->state = HCD_STATE_SUSPENDED;
    /* FALLTHROUGH */

    /* entry with CONFIG_USB_SUSPEND, or hcds that autosuspend: the
     * controller and/or root hub will already have been suspended,
     * but it won't be ready for a PCI resume call.
     *
     * FIXME only CONFIG_USB_SUSPEND guarantees hub_suspend() will
     * have been called, otherwise root hub timers still run ...
     */
    case HCD_STATE_SUSPENDED:
        if (state <= dev->current_state)
            break;

        /* no DMA or IRQs except in D0 */
        if (!dev->current_state) {
            pci_save_state (dev);
            pci_disable_device (dev);
            free_irq (hcd->irq, hcd);
        }

        if (!has_pci_pm) {
            dev_dbg (hcd->self.controller, "--> PCI D0/legacy\n");
            break;
        }

        /* POLICY: ignore D1/D2/D3hot differences;
         * we know D3hot will always work.
         */
        retval = pci_set_power_state (dev, state);
        if (retval < 0 && state < 3) {
            retval = pci_set_power_state (dev, 3);
            if (retval == 0)
                state = 3;
        }
        if (retval == 0) {
            dev_dbg (hcd->self.controller, "--> PCI %s\n",
                     pci_state(dev->current_state));
#ifdef	CONFIG_USB_SUSPEND
            pci_enable_wake (dev, state, hcd->remote_wakeup);
            pci_enable_wake (dev, 4, hcd->remote_wakeup);
#endif
        } else if (retval < 0) {
            dev_dbg (&dev->dev, "PCI %s suspend fail, %d\n",
                     pci_state(state), retval);
            (void) usb_hcd_pci_resume (dev);
            break;
        }
        break;
    default:
        dev_dbg (hcd->self.controller, "hcd state %d; not suspended\n",
                 hcd->state);
        retval = -EINVAL;
        break;
    }

    /* update power_state **ONLY** to make sysfs happier */
    if (retval == 0)
        dev->dev.power.power_state = state;
    return retval;
}
Пример #29
0
static void snd_card_emu10k1_remove(struct pci_dev *pci)
{
	snd_card_free(pci_get_drvdata(pci));
}
Пример #30
0
static void snd_via82xx_remove(struct pci_dev *pci)
{
	snd_card_free(pci_get_drvdata(pci));
}