/* wait until all locks are released */ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line) { int max_count = 5 * HZ; if (atomic_read(lockp) < 0) { ; return; } while (atomic_read(lockp) > 0) { if (max_count == 0) { ; break; } schedule_timeout_uninterruptible(1); max_count--; } }
/* wait until all locks are released */ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line) { int max_count = 5 * HZ; if (atomic_read(lockp) < 0) { printk(KERN_WARNING "seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line); return; } while (atomic_read(lockp) > 0) { if (max_count == 0) { snd_printk(KERN_WARNING "seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line); break; } schedule_timeout_uninterruptible(1); max_count--; } }
struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) { struct svc_rdma_op_ctxt *ctxt; while (1) { ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL); if (ctxt) break; schedule_timeout_uninterruptible(msecs_to_jiffies(500)); } ctxt->xprt = xprt; INIT_LIST_HEAD(&ctxt->dto_q); ctxt->count = 0; ctxt->frmr = NULL; atomic_inc(&xprt->sc_ctxt_used); return ctxt; }
/* * Set current time and date in RTC */ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm) { struct wm8350 *wm8350 = dev_get_drvdata(dev); u16 time[4]; u16 rtc_ctrl; int ret, retries = WM8350_SET_TIME_RETRIES; time[0] = tm->tm_sec; time[0] |= tm->tm_min << WM8350_RTC_MINS_SHIFT; time[1] = tm->tm_hour; time[1] |= (tm->tm_wday + 1) << WM8350_RTC_DAY_SHIFT; time[2] = tm->tm_mday; time[2] |= (tm->tm_mon + 1) << WM8350_RTC_MTH_SHIFT; time[3] = ((tm->tm_year + 1900) / 100) << WM8350_RTC_YHUNDREDS_SHIFT; time[3] |= (tm->tm_year + 1900) % 100; dev_dbg(dev, "Setting: %04x %04x %04x %04x\n", time[0], time[1], time[2], time[3]); /* Set RTC_SET to stop the clock */ ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_SET); if (ret < 0) return ret; /* Wait until confirmation of stopping */ do { rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); schedule_timeout_uninterruptible(msecs_to_jiffies(1)); } while (--retries && !(rtc_ctrl & WM8350_RTC_STS)); if (!retries) { dev_err(dev, "timed out on set confirmation\n"); return -EIO; } /* Write time to RTC */ ret = wm8350_block_write(wm8350, WM8350_RTC_SECONDS_MINUTES, 4, time); if (ret < 0) return ret; /* Clear RTC_SET to start the clock */ ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_SET); return ret; }
static int ams_i2c_cmd(enum ams_i2c_cmd cmd) { s32 result; int count = 3; ams_i2c_write(AMS_COMMAND, cmd); msleep(5); while (count--) { result = ams_i2c_read(AMS_COMMAND); if (result == 0 || result & 0x80) return 0; schedule_timeout_uninterruptible(HZ / 20); } return -1; }
/* prcmu resout1 pin is used for CG2900 reset*/ void dcg2900_u5500_enable_chip(struct cg2900_chip_dev *dev) { struct dcg2900_info *info = dev->b_data; clk_enable(info->lpoclk); /* * Due to a bug in CG2900 we cannot just set GPIO high to enable * the chip. We must wait more than 100 msecs before enbling the * chip. * - Set PDB to low. * - Wait for 100 msecs * - Set PDB to high. */ prcmu_resetout(1, 0); schedule_timeout_uninterruptible(msecs_to_jiffies( CHIP_ENABLE_PDB_LOW_TIMEOUT)); prcmu_resetout(1, 1); }
unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, int adcsync) { unsigned int val; if (adcsync) adc_channel |= UCB_ADC_SYNC_ENA; ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel); ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel | UCB_ADC_START); while (!((val = ucb1400_reg_read(ac97, UCB_ADC_DATA)) & UCB_ADC_DAT_VALID)) schedule_timeout_uninterruptible(1); return val & UCB_ADC_DAT_MASK; }
int autofs4_expire_wait(struct dentry *dentry, int rcu_walk) { struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); struct autofs_info *ino = autofs4_dentry_ino(dentry); int status; int state; /* Block on any pending expire */ if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE)) return 0; if (rcu_walk) return -ECHILD; retry: spin_lock(&sbi->fs_lock); state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING); if (state == AUTOFS_INF_WANT_EXPIRE) { spin_unlock(&sbi->fs_lock); /* * Possibly being selected for expire, wait until * it's selected or not. */ schedule_timeout_uninterruptible(HZ/10); goto retry; } if (state & AUTOFS_INF_EXPIRING) { spin_unlock(&sbi->fs_lock); pr_debug("waiting for expire %p name=%pd\n", dentry, dentry); status = autofs4_wait(sbi, dentry, NFY_NONE); wait_for_completion(&ino->expire_complete); pr_debug("expire done status=%d\n", status); if (d_unhashed(dentry)) return -EAGAIN; return status; } spin_unlock(&sbi->fs_lock); return 0; }
/* * Wait for the host to return its start-up acknowledgement * sequence. This wait is too long for us to perform * "busy-waiting", and so we must sleep. This in turn means * that we must not be holding any spinlocks when we call * this function. */ static int host_startup_ack(struct soundscape *s, unsigned timeout) { while (timeout != 0) { unsigned long flags; unsigned char x; schedule_timeout_uninterruptible(1); spin_lock_irqsave(&s->lock, flags); x = inb(HOST_DATA_IO(s->io_base)); spin_unlock_irqrestore(&s->lock, flags); if (x == 0xfe) return 1; --timeout; } /* while */ return 0; }
int snd_seq_pool_done(struct snd_seq_pool *pool) { unsigned long flags; struct snd_seq_event_cell *ptr; int max_count = 5 * HZ; if (snd_BUG_ON(!pool)) return -EINVAL; spin_lock_irqsave(&pool->lock, flags); pool->closing = 1; spin_unlock_irqrestore(&pool->lock, flags); if (waitqueue_active(&pool->output_sleep)) wake_up(&pool->output_sleep); while (atomic_read(&pool->counter) > 0) { if (max_count == 0) { snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); break; } schedule_timeout_uninterruptible(1); max_count--; } spin_lock_irqsave(&pool->lock, flags); ptr = pool->ptr; pool->ptr = NULL; pool->free = NULL; pool->total_elements = 0; spin_unlock_irqrestore(&pool->lock, flags); vfree(ptr); spin_lock_irqsave(&pool->lock, flags); pool->closing = 0; spin_unlock_irqrestore(&pool->lock, flags); return 0; }
static int snd_ali_stimer_ready(struct snd_ali *codec) { unsigned long end_time; unsigned long dwChk1,dwChk2; dwChk1 = snd_ali_5451_peek(codec, ALI_STIMER); end_time = jiffies + msecs_to_jiffies(250); for (;;) { dwChk2 = snd_ali_5451_peek(codec, ALI_STIMER); if (dwChk2 != dwChk1) return 0; if (!time_after_eq(end_time, jiffies)) break; schedule_timeout_uninterruptible(1); } dev_err(codec->card->dev, "ali_stimer_read: stimer is not ready.\n"); return -EIO; }
static unsigned int ucb1400_adc_read(struct ucb1400 *ucb, u16 adc_channel) { unsigned int val; if (ucb->adcsync) adc_channel |= UCB_ADC_SYNC_ENA; ucb1400_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | adc_channel); ucb1400_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | adc_channel | UCB_ADC_START); for (;;) { val = ucb1400_reg_read(ucb, UCB_ADC_DATA); if (val & UCB_ADC_DAT_VALID) break; /* yield to other processes */ schedule_timeout_uninterruptible(1); } return UCB_ADC_DAT_VALUE(val); }
static int tenxpress_phy_init(struct efx_nic *efx) { int rc; falcon_board(efx)->type->init_phy(efx); if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { if (efx->phy_type == PHY_TYPE_SFT9001A) { int reg; reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG); reg |= (1 << PMA_PMD_EXT_SSR_LBN); efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); mdelay(200); } rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); if (rc < 0) return rc; rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); if (rc < 0) return rc; } rc = tenxpress_init(efx); if (rc < 0) return rc; /* Reinitialise flow control settings */ efx_link_set_wanted_fc(efx, efx->wanted_fc); efx_mdio_an_reconfigure(efx); schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ /* Let XGXS and SerDes out of reset */ falcon_reset_xaui(efx); return 0; }
/* * Prepare controller for a transaction and call omap_i2c_xfer_msg * to do the work during IRQ processing. */ static int omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); int i; int r; u16 val; omap_i2c_unidle(dev); r = omap_i2c_wait_for_bb(dev); schedule_timeout_uninterruptible(5);// DCY - this seems to prevent lockup with Multi-Master system /* If timeout, try to again check after soft reset of I2C block */ if (WARN_ON(r == -ETIMEDOUT)) { /* Provide a permanent clock to recover the peripheral */ val = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); val |= (OMAP_I2C_SYSTEST_ST_EN | OMAP_I2C_SYSTEST_FREE | (2 << OMAP_I2C_SYSTEST_TMODE_SHIFT)); omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, val); msleep(1); omap_i2c_init(dev); r = omap_i2c_wait_for_bb(dev); } if (r < 0) goto out; for (i = 0; i < num; i++) { r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1))); if (r != 0) break; } if (r == 0) r = num; omap_i2c_wait_for_bb(dev); out: omap_i2c_idle(dev); return r; }
static int snd_ali_codec_ready(struct snd_ali *codec, unsigned int port) { unsigned long end_time; unsigned int res; end_time = jiffies + msecs_to_jiffies(250); for (;;) { res = snd_ali_5451_peek(codec,port); if (!(res & 0x8000)) return 0; if (!time_after_eq(end_time, jiffies)) break; schedule_timeout_uninterruptible(1); } snd_ali_5451_poke(codec, port, res & ~0x8000); dev_dbg(codec->card->dev, "ali_codec_ready: codec is not ready.\n "); return -EIO; }
static int falcon_spi_wait(struct efx_nic *efx) { unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10); int i; for (i = 0; i < 10; i++) { if (!falcon_spi_poll(efx)) return 0; udelay(10); } for (;;) { if (!falcon_spi_poll(efx)) return 0; if (time_after_eq(jiffies, timeout)) { netif_err(efx, hw, efx->net_dev, "timed out waiting for SPI\n"); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } }
int lio_wait_for_clean_oq(struct octeon_device *oct) { int retry = 100, pending_pkts = 0; int idx; do { pending_pkts = 0; for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) { if (!(oct->io_qmask.oq & BIT_ULL(idx))) continue; pending_pkts += atomic_read(&oct->droq[idx]->pkts_pending); } if (pending_pkts > 0) schedule_timeout_uninterruptible(1); } while (retry-- && pending_pkts); return pending_pkts; }
/* Zeroes out the SRAM contents. This routine must be called in * process context and is allowed to sleep. */ static int falcon_reset_sram(struct efx_nic *efx) { efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; int count; /* Set the SRAM wake/sleep GPIO appropriately. */ efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); /* Initiate SRAM reset */ EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN, 1, FRF_AZ_SRM_NB_SZ, 0); efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); /* Wait for SRAM reset to complete */ count = 0; do { netif_dbg(efx, hw, efx->net_dev, "waiting for SRAM reset (attempt %d)...\n", count); /* SRAM reset is slow; expect around 16ms */ schedule_timeout_uninterruptible(HZ / 50); /* Check for reset complete */ efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { netif_dbg(efx, hw, efx->net_dev, "SRAM reset complete\n"); return 0; } } while (++count < 20); /* wait up to 0.4 sec */ netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); return -ETIMEDOUT; }
/* * Reset the chip using run bit, also lock PLL using ILRCK and * put back AES3INPUT. This workaround is described in latest * CS8427 datasheet, otherwise TXDSERIAL will not work. */ static void snd_cs8427_reset(struct snd_i2c_device *cs8427) { struct cs8427 *chip; unsigned long end_time; int data, aes3input = 0; if (snd_BUG_ON(!cs8427)) return; chip = cs8427->private_data; snd_i2c_lock(cs8427->bus); if ((chip->regmap[CS8427_REG_CLOCKSOURCE] & CS8427_RXDAES3INPUT) == CS8427_RXDAES3INPUT) /* AES3 bit is set */ aes3input = 1; chip->regmap[CS8427_REG_CLOCKSOURCE] &= ~(CS8427_RUN | CS8427_RXDMASK); snd_cs8427_reg_write(cs8427, CS8427_REG_CLOCKSOURCE, chip->regmap[CS8427_REG_CLOCKSOURCE]); udelay(200); chip->regmap[CS8427_REG_CLOCKSOURCE] |= CS8427_RUN | CS8427_RXDILRCK; snd_cs8427_reg_write(cs8427, CS8427_REG_CLOCKSOURCE, chip->regmap[CS8427_REG_CLOCKSOURCE]); udelay(200); snd_i2c_unlock(cs8427->bus); end_time = jiffies + chip->reset_timeout; while (time_after_eq(end_time, jiffies)) { snd_i2c_lock(cs8427->bus); data = snd_cs8427_reg_read(cs8427, CS8427_REG_RECVERRORS); snd_i2c_unlock(cs8427->bus); if (!(data & CS8427_UNLOCK)) break; schedule_timeout_uninterruptible(1); } snd_i2c_lock(cs8427->bus); chip->regmap[CS8427_REG_CLOCKSOURCE] &= ~CS8427_RXDMASK; if (aes3input) chip->regmap[CS8427_REG_CLOCKSOURCE] |= CS8427_RXDAES3INPUT; snd_cs8427_reg_write(cs8427, CS8427_REG_CLOCKSOURCE, chip->regmap[CS8427_REG_CLOCKSOURCE]); snd_i2c_unlock(cs8427->bus); }
static int falcon_reset_sram(struct efx_nic *efx) { efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; int count; efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN, 1, FRF_AZ_SRM_NB_SZ, 0); efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); count = 0; do { netif_dbg(efx, hw, efx->net_dev, "waiting for SRAM reset (attempt %d)...\n", count); schedule_timeout_uninterruptible(HZ / 50); efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { netif_dbg(efx, hw, efx->net_dev, "SRAM reset complete\n"); return 0; } } while (++count < 20); netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); return -ETIMEDOUT; }
static void try_to_suspend(struct work_struct *work) { unsigned int initial_count, final_count; if (!pm_get_wakeup_count(&initial_count, true)) goto out; mutex_lock(&autosleep_lock); if (!pm_save_wakeup_count(initial_count) || system_state != SYSTEM_RUNNING) { mutex_unlock(&autosleep_lock); goto out; } if (autosleep_state == PM_SUSPEND_ON) { mutex_unlock(&autosleep_lock); return; } if (autosleep_state >= PM_SUSPEND_MAX) hibernate(); else pm_suspend(autosleep_state); mutex_unlock(&autosleep_lock); if (!pm_get_wakeup_count(&final_count, false)) goto out; /* * If the wakeup occured for an unknown reason, wait to prevent the * system from trying to suspend and waking up in a tight loop. */ if (final_count == initial_count) schedule_timeout_uninterruptible(HZ / 2); out: queue_up_suspend_work(); }
static int wm8350_rtc_start_alarm(struct wm8350 *wm8350) { int ret; int retries = WM8350_SET_ALM_RETRIES; u16 rtc_ctrl; ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_ALMSET); if (ret < 0) return ret; /* Wait until confirmation */ do { rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); schedule_timeout_uninterruptible(msecs_to_jiffies(1)); } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS); if (rtc_ctrl & WM8350_RTC_ALMSTS) return -ETIMEDOUT; return 0; }
/* Test generation and receipt of interrupts */ static int efx_test_interrupts(struct efx_nic *efx, struct efx_self_tests *tests) { struct efx_channel *channel; EFX_LOG(efx, "testing interrupts\n"); tests->interrupt = -1; /* Reset interrupt flag */ efx->last_irq_cpu = -1; smp_wmb(); /* ACK each interrupting event queue. Receiving an interrupt due to * traffic before a test event is raised is considered a pass */ efx_for_each_channel(channel, efx) { if (channel->work_pending) efx_process_channel_now(channel); if (efx->last_irq_cpu >= 0) goto success; } efx_nic_generate_interrupt(efx); /* Wait for arrival of test interrupt. */ EFX_LOG(efx, "waiting for test interrupt\n"); schedule_timeout_uninterruptible(HZ / 10); if (efx->last_irq_cpu >= 0) goto success; EFX_ERR(efx, "timed out waiting for interrupt\n"); return -ETIMEDOUT; success: EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx), efx->last_irq_cpu); tests->interrupt = 1; return 0; }
static int prism54_bring_down(islpci_private *priv) { void __iomem *device_base = priv->device_base; u32 reg; /* we are going to shutdown the device */ islpci_set_state(priv, PRV_STATE_PREBOOT); /* disable all device interrupts in case they weren't */ isl38xx_disable_interrupts(priv->device_base); /* For safety reasons, we may want to ensure that no DMA transfer is * currently in progress by emptying the TX and RX queues. */ /* wait until interrupts have finished executing on other CPUs */ synchronize_irq(priv->pdev->irq); reg = readl(device_base + ISL38XX_CTRL_STAT_REG); reg &= ~(ISL38XX_CTRL_STAT_RESET | ISL38XX_CTRL_STAT_RAMBOOT); writel(reg, device_base + ISL38XX_CTRL_STAT_REG); wmb(); udelay(ISL38XX_WRITEIO_DELAY); reg |= ISL38XX_CTRL_STAT_RESET; writel(reg, device_base + ISL38XX_CTRL_STAT_REG); wmb(); udelay(ISL38XX_WRITEIO_DELAY); /* clear the Reset bit */ reg &= ~ISL38XX_CTRL_STAT_RESET; writel(reg, device_base + ISL38XX_CTRL_STAT_REG); wmb(); /* wait a while for the device to reset */ schedule_timeout_uninterruptible(msecs_to_jiffies(50)); return 0; }
/* Wait up to 10 ms for buffered write completion */ int falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) { unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); u8 status; int rc; for (;;) { rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, &status, sizeof(status)); if (rc) return rc; if (!(status & SPI_STATUS_NRDY)) return 0; if (time_after_eq(jiffies, timeout)) { EFX_ERR(efx, "SPI write timeout on device %d" " last status=0x%02x\n", spi->device_id, status); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } }
static void tenxpress_phy_fini(struct efx_nic *efx) { int reg; if (efx->phy_type == PHY_TYPE_SFT9001B) device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_short_reach); if (efx->phy_type == PHY_TYPE_SFX7101) { /* Power down the LNPGA */ reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); /* Waiting here ensures that the board fini, which can turn * off the power to the PHY, won't get run until the LNPGA * powerdown has been given long enough to complete. */ schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */ } kfree(efx->phy_data); efx->phy_data = NULL; }
static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350) { int retries = WM8350_SET_ALM_RETRIES; u16 rtc_ctrl; int ret; /* Set RTC_SET to stop the clock */ ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_ALMSET); if (ret < 0) return ret; /* Wait until confirmation of stopping */ do { rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); schedule_timeout_uninterruptible(msecs_to_jiffies(1)); } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS)); if (!(rtc_ctrl & WM8350_RTC_ALMSTS)) return -ETIMEDOUT; return 0; }
/* * This is the callback kernel thread. */ static int nfs_callback_svc(void *vrqstp) { int err, preverr = 0; struct svc_rqst *rqstp = vrqstp; set_freezable(); /* * FIXME: do we really need to run this under the BKL? If so, please * add a comment about what it's intended to protect. */ lock_kernel(); while (!kthread_should_stop()) { /* * Listen for a request on the socket */ err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT); if (err == -EAGAIN || err == -EINTR) { preverr = err; continue; } if (err < 0) { if (err != preverr) { printk(KERN_WARNING "%s: unexpected error " "from svc_recv (%d)\n", __func__, err); preverr = err; } schedule_timeout_uninterruptible(HZ); continue; } preverr = err; svc_process(rqstp); } unlock_kernel(); return 0; }
static void ipmi_unregister_watchdog(int ipmi_intf) { int rv; if (!watchdog_user) goto out; if (watchdog_ifnum != ipmi_intf) goto out; /* Make sure no one can call us any more. */ misc_deregister(&ipmi_wdog_miscdev); /* * Wait to make sure the message makes it out. The lower layer has * pointers to our buffers, we want to make sure they are done before * we release our memory. */ while (atomic_read(&set_timeout_tofree)) schedule_timeout_uninterruptible(1); /* Disconnect from IPMI. */ rv = ipmi_destroy_user(watchdog_user); if (rv) { #ifdef CONFIG_DEBUG_PRINTK printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n", rv); #else ; #endif } watchdog_user = NULL; out: return; }
static bool mfc_reset(void) { unsigned int mc_status; unsigned long timeo = jiffies; timeo += msecs_to_jiffies(MC_STATUS_TIMEOUT); /* Stop procedure */ /* FIXME: F/W can be access invalid address */ /* Reset VI */ /* write_reg(0x3F7, MFC_SW_RESET); */ write_reg(0x3F6, MFC_SW_RESET); /* Reset RISC */ write_reg(0x3E2, MFC_SW_RESET); /* All reset except for MC */ mdelay(10); /* Check MC status */ do { mc_status = (read_reg(MFC_MC_STATUS) & 0x3); if (mc_status == 0) break; schedule_timeout_uninterruptible(1); /* FiXME: cpu_relax() */ } while (time_before(jiffies, timeo)); if (mc_status != 0) return false; write_reg(0x0, MFC_SW_RESET); write_reg(0x3FE, MFC_SW_RESET); return true; }