int RtmpOSIRQRelease(IN PNET_DEV pNetDev) { struct net_device *net_dev = pNetDev; PRTMP_ADAPTER pAd = NULL; GET_PAD_FROM_NET_DEV(pAd, net_dev); ASSERT(pAd); #ifdef RTMP_PCI_SUPPORT if (pAd->infType == RTMP_DEV_INF_PCI || pAd->infType == RTMP_DEV_INF_PCIE) { POS_COOKIE pObj = (POS_COOKIE)(pAd->OS_Cookie); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) synchronize_irq(pObj->pci_dev->irq); #endif free_irq(pObj->pci_dev->irq, (net_dev)); RTMP_MSI_DISABLE(pAd); } #endif // RTMP_PCI_SUPPORT // #ifdef RTMP_RBUS_SUPPORT if (pAd->infType == RTMP_DEV_INF_RBUS) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) synchronize_irq(net_dev->irq); #endif free_irq(net_dev->irq, (net_dev)); } #endif // RTMP_RBUS_SUPPORT // return 0; }
static int pixcir_stop(struct pixcir_i2c_ts_data *ts) { int error; /* Disable interrupt generation */ error = pixcir_int_enable(ts, false); if (error) { dev_err(&ts->client->dev, "Failed to disable interrupt generation: %d\n", error); return error; } /* Exit ISR if running, no more report parsing */ ts->running = false; mb(); /* update status before we synchronize irq */ /* Wait till running ISR is complete */ synchronize_irq(ts->client->irq); if (ts->gpio_enable) gpiod_set_value_cansleep(ts->gpio_enable, 0); return 0; }
static void hxge_irq_disable(struct hxge_adapter *adapter) { atomic_inc(&adapter->irq_sem); hxge_disable_interrupts(adapter); synchronize_irq(adapter->pdev->irq); }
static void __exit my_exit (void) { synchronize_irq (irq); free_irq (irq, &my_dev_id); printk (KERN_INFO "Successfully unloading, irq_counter = %d\n", irq_counter); }
/* * Set up an VME interrupt */ void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, int state, int sync) { struct pci_dev *pdev; u32 tmp; struct ca91cx42_driver *bridge; bridge = ca91cx42_bridge->driver_priv; /* Enable IRQ level */ tmp = ioread32(bridge->base + LINT_EN); if (state == 0) tmp &= ~CA91CX42_LINT_VIRQ[level]; else tmp |= CA91CX42_LINT_VIRQ[level]; iowrite32(tmp, bridge->base + LINT_EN); if ((state == 0) && (sync != 0)) { pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev); synchronize_irq(pdev->irq); }
/* * power management */ static int rtsx_suspend(struct pci_dev *pci, pm_message_t state) { struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci); struct rtsx_chip *chip; if (!dev) return 0; /* lock the device pointers */ mutex_lock(&(dev->dev_mutex)); chip = dev->chip; rtsx_do_before_power_down(chip, PM_S3); if (dev->irq >= 0) { synchronize_irq(dev->irq); free_irq(dev->irq, (void *)dev); dev->irq = -1; } if (chip->msi_en) pci_disable_msi(pci); pci_save_state(pci); pci_enable_wake(pci, pci_choose_state(pci, state), 1); pci_disable_device(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); /* unlock the device pointers */ mutex_unlock(&dev->dev_mutex); return 0; }
static void rtsx_shutdown(struct pci_dev *pci) { struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci); struct rtsx_chip *chip; printk(KERN_INFO "Ready to shutdown\n"); if (!dev) { printk(KERN_ERR "Invalid memory\n"); return; } chip = dev->chip; rtsx_do_before_power_down(chip, PM_S1); if (dev->irq >= 0) { synchronize_irq(dev->irq); free_irq(dev->irq, (void *)dev); dev->irq = -1; } if (chip->msi_en) pci_disable_msi(pci); pci_disable_device(pci); return; }
static int snd_ad1889_free(struct snd_ad1889 *chip) { if (chip->irq < 0) goto skip_hw; spin_lock_irq(&chip->lock); ad1889_mute(chip); /* Turn off interrupt on count and zero DMA registers */ ad1889_channel_reset(chip, AD_CHAN_WAV | AD_CHAN_ADC); /* clear DISR. If we don't, we'd better jump off the Eiffel Tower */ ad1889_writel(chip, AD_DMA_DISR, AD_DMA_DISR_PTAI | AD_DMA_DISR_PMAI); ad1889_readl(chip, AD_DMA_DISR); /* flush, dammit! */ spin_unlock_irq(&chip->lock); synchronize_irq(chip->irq); if (chip->irq >= 0) free_irq(chip->irq, chip); skip_hw: if (chip->iobase) iounmap(chip->iobase); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; }
static void rtsx_shutdown(struct pci_dev *pci) { struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci); struct rtsx_chip *chip; if (!dev) return; chip = dev->chip; rtsx_do_before_power_down(chip, PM_S1); if (dev->irq >= 0) { synchronize_irq(dev->irq); free_irq(dev->irq, (void *)dev); dev->irq = -1; } if (chip->msi_en) pci_disable_msi(pci); pci_disable_device(pci); return; }
int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct oxygen *chip = card->private_data; unsigned int i, saved_interrupt_mask; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); for (i = 0; i < PCM_COUNT; ++i) if (chip->streams[i]) snd_pcm_suspend(chip->streams[i]); if (chip->model.suspend) chip->model.suspend(chip); spin_lock_irq(&chip->reg_lock); saved_interrupt_mask = chip->interrupt_mask; chip->interrupt_mask = 0; oxygen_write16(chip, OXYGEN_DMA_STATUS, 0); oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0); spin_unlock_irq(&chip->reg_lock); synchronize_irq(chip->irq); flush_scheduled_work(); chip->interrupt_mask = saved_interrupt_mask; pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; }
static int snd_nm256_free(nm256_t *chip) { if (chip->streams[SNDRV_PCM_STREAM_PLAYBACK].running) snd_nm256_playback_stop(chip); if (chip->streams[SNDRV_PCM_STREAM_CAPTURE].running) snd_nm256_capture_stop(chip); if (chip->irq >= 0) synchronize_irq(chip->irq); if (chip->cport) iounmap(chip->cport); if (chip->buffer) iounmap(chip->buffer); if (chip->res_cport) { release_resource(chip->res_cport); kfree_nocheck(chip->res_cport); } if (chip->res_buffer) { release_resource(chip->res_buffer); kfree_nocheck(chip->res_buffer); } if (chip->irq >= 0) free_irq(chip->irq, (void*)chip); pci_disable_device(chip->pci); kfree(chip); return 0; }
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) { struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct device *dev = hr_dev->dev; int ret; ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn); if (ret) dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret, hr_cq->cqn); /* Waiting interrupt process procedure carried out */ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); /* wait for all interrupt processed */ if (atomic_dec_and_test(&hr_cq->refcount)) complete(&hr_cq->free); wait_for_completion(&hr_cq->free); spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, hr_cq->cqn); spin_unlock_irq(&cq_table->lock); hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR); }
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { unsigned long indx; /* Make sure the HW channel is stopped! */ synchronize_irq(rcfw->vector); tasklet_disable(&rcfw->worker); tasklet_kill(&rcfw->worker); if (rcfw->requested) { free_irq(rcfw->vector, rcfw); rcfw->requested = false; } if (rcfw->cmdq_bar_reg_iomem) iounmap(rcfw->cmdq_bar_reg_iomem); rcfw->cmdq_bar_reg_iomem = NULL; if (rcfw->creq_bar_reg_iomem) iounmap(rcfw->creq_bar_reg_iomem); rcfw->creq_bar_reg_iomem = NULL; indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size); if (indx != rcfw->bmap_size) dev_err(&rcfw->pdev->dev, "QPLIB: disabling RCFW with pending cmd-bit %lx", indx); kfree(rcfw->cmdq_bitmap); rcfw->bmap_size = 0; rcfw->aeq_handler = NULL; rcfw->vector = 0; }
static int hypercall_close(struct hypercall_dev* dev) { //hypercall_thread_stop(dev); synchronize_irq(dev->irq); free_irq(dev->irq, dev); return 0; }
/** * disable_irq - disable an irq and wait for completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and disables * are nested. This functions waits for any pending IRQ * handlers for this interrupt to complete before returning. * If you use this function while holding a resource the IRQ * handler may need you will deadlock. * * This function may be called - with care - from IRQ context. */ void disable_irq(unsigned int irq) { struct irqdesc *desc = irq_desc + irq; disable_irq_nosync(irq); if (desc->action) synchronize_irq(irq); }
// shutdown device hardware void dev_down() { write_IntrMask(0); synchronize_irq(); dev_on = false; };
static void tsc_stop(struct input_dev *dev) { struct tsc_data *ts = input_get_drvdata(dev); tsc_clr_bits(ts, tscm, TSC_EN); synchronize_irq(ts->tsc_irq); del_timer_sync(&ts->timer); clk_disable(ts->clk); }
static void __exit my_exit (void) { printk (KERN_INFO "\nExiting with mutex having count=%d:\n", atomic_read (&my_mutex.count)); synchronize_irq (irq); free_irq (irq, &my_dev_id); printk (KERN_INFO "Successfully unloading, irq_counter = %d\n", irq_counter); }
static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) { int i; if (xhci->msix_entries) { for (i = 0; i < xhci->msix_count; i++) synchronize_irq(xhci->msix_entries[i].vector); } }
/* * constructor for chip instance */ static int snd_atiixp_create(struct snd_card *card, struct pci_dev *pci, struct atiixp **r_chip) { static struct snd_device_ops ops = { .dev_free = snd_atiixp_dev_free, }; struct atiixp *chip; int err; if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); mutex_init(&chip->open_mutex); chip->card = card; chip->pci = pci; chip->irq = -1; if ((err = pci_request_regions(pci, "ATI IXP AC97")) < 0) { pci_disable_device(pci); kfree(chip); return err; } chip->addr = pci_resource_start(pci, 0); chip->remap_addr = pci_ioremap_bar(pci, 0); if (chip->remap_addr == NULL) { snd_printk(KERN_ERR "AC'97 space ioremap problem\n"); snd_atiixp_free(chip); return -EIO; } if (request_irq(pci->irq, snd_atiixp_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_atiixp_free(chip); return -EBUSY; } chip->irq = pci->irq; pci_set_master(pci); synchronize_irq(chip->irq); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_atiixp_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *r_chip = chip; return 0; }
int __init x3proto_gpio_setup(void) { int ilsel; int ret, i; ilsel = ilsel_enable(ILSEL_KEY); if (unlikely(ilsel < 0)) return ilsel; ret = gpiochip_add(&x3proto_gpio_chip); if (unlikely(ret)) goto err_gpio; for (i = 0; i < NR_BASEBOARD_GPIOS; i++) { unsigned long flags; int irq = create_irq(); if (unlikely(irq < 0)) { ret = -EINVAL; goto err_irq; } spin_lock_irqsave(&x3proto_gpio_lock, flags); x3proto_gpio_irq_map[i] = irq; irq_set_chip_and_handler_name(irq, &dummy_irq_chip, handle_simple_irq, "gpio"); spin_unlock_irqrestore(&x3proto_gpio_lock, flags); } pr_info("registering '%s' support, handling GPIOs %u -> %u, " "bound to IRQ %u\n", x3proto_gpio_chip.label, x3proto_gpio_chip.base, x3proto_gpio_chip.base + x3proto_gpio_chip.ngpio, ilsel); irq_set_chained_handler(ilsel, x3proto_gpio_irq_handler); irq_set_irq_wake(ilsel, 1); return 0; err_irq: for (; i >= 0; --i) if (x3proto_gpio_irq_map[i]) destroy_irq(x3proto_gpio_irq_map[i]); ret = gpiochip_remove(&x3proto_gpio_chip); if (unlikely(ret)) pr_err("Failed deregistering GPIO\n"); err_gpio: synchronize_irq(ilsel); ilsel_disable(ILSEL_KEY); return ret; }
/* * constructor for chip instance */ static int __devinit snd_atiixp_create(snd_card_t *card, struct pci_dev *pci, atiixp_t **r_chip) { static snd_device_ops_t ops = { .dev_free = snd_atiixp_dev_free, }; atiixp_t *chip; int err; if ((err = pci_enable_device(pci)) < 0) return err; chip = kcalloc(1, sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); init_MUTEX(&chip->open_mutex); chip->card = card; chip->pci = pci; chip->irq = -1; if ((err = pci_request_regions(pci, "ATI IXP AC97")) < 0) { pci_disable_device(pci); kfree(chip); return err; } chip->addr = pci_resource_start(pci, 0); chip->remap_addr = ioremap_nocache(chip->addr, pci_resource_len(pci, 0)); if (chip->remap_addr == NULL) { snd_printk(KERN_ERR "AC'97 space ioremap problem\n"); snd_atiixp_free(chip); return -EIO; } if (request_irq(pci->irq, snd_atiixp_interrupt, SA_INTERRUPT|SA_SHIRQ, card->shortname, (void *)chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_atiixp_free(chip); return -EBUSY; } chip->irq = pci->irq; pci_set_master(pci); synchronize_irq(chip->irq); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_atiixp_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *r_chip = chip; return 0; }
static int emac_sgmii_link_down(struct emac_adapter *adpt) { struct emac_sgmii *sgmii = &adpt->phy; /* Disable interrupts */ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); synchronize_irq(sgmii->irq); return 0; }
static void niitm_exit (void) { synchronize_irq(19); free_irq(19, (void *)&d_irq); kfree(my_buf); cdev_del(my_cdev); unregister_chrdev_region(first_node, 1); unregister_netdev(devnet); free_netdev(devnet); }
/** * disable_irq - disable an irq and wait for completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and Disables are * nested. * This function waits for any pending IRQ handlers for this interrupt * to complete before returning. If you use this function while * holding a resource the IRQ handler may need you will deadlock. * * This function may be called - with care - from IRQ context. */ void disable_irq(unsigned int irq) { struct irq_desc *desc = irq_desc + irq; if (irq >= NR_IRQS) return; disable_irq_nosync(irq); if (desc->action) synchronize_irq(irq); }
static void ath10k_ahb_hif_stop(struct ath10k *ar) { struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif stop\n"); ath10k_ahb_irq_disable(ar); synchronize_irq(ar_ahb->irq); ath10k_pci_flush(ar); }
static int phantom_suspend(struct pci_dev *pdev, pm_message_t state) { struct phantom_device *dev = pci_get_drvdata(pdev); iowrite32(0, dev->caddr + PHN_IRQCTL); ioread32(dev->caddr + PHN_IRQCTL); synchronize_irq(pdev->irq); return 0; }
static void uart_port_shutdown(struct tty_port *port) { struct uart_state *state = container_of(port, struct uart_state, port); struct uart_port *uport = state->uart_port; wake_up_interruptible(&port->delta_msr_wait); uport->ops->shutdown(uport); synchronize_irq(uport->irq); }
/** * disable_irq - disable an irq and wait for completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and Disables are * nested. * This function waits for any pending IRQ handlers for this interrupt * to complete before returning. If you use this function while * holding a resource the IRQ handler may need you will deadlock. * * This function may be called - with care - from IRQ context. */ void disable_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (!desc) return; disable_irq_nosync(irq); if (desc->action) synchronize_irq(irq); }
static void i8259_host_unmap(struct irq_host *h, unsigned int virq) { /* Make sure irq is masked in hardware */ i8259_mask_irq(virq); /* remove chip and handler */ set_irq_chip_and_handler(virq, NULL, NULL); /* Make sure it's completed */ synchronize_irq(virq); }