static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, unsigned n, u8 byte) { u8 *cp = host->data->status; unsigned long start = jiffies; while (1) { int status; unsigned i; status = mmc_spi_readbytes(host, n); if (status < 0) return status; for (i = 0; i < n; i++) { if (cp[i] != byte) return cp[i]; } if (time_is_before_jiffies(start + timeout)) break; /* If we need long timeouts, we may release the CPU. * We use jiffies here because we want to have a relation * between elapsed time and the blocking of the scheduler. */ if (time_is_before_jiffies(start+1)) schedule(); } return -ETIMEDOUT; }
static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd) { unsigned long target = jiffies + msecs_to_jiffies(1000); u32 val; do { val = readl(i2cd->regs + I2C_MST_CNTL); if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER)) break; if ((val & I2C_MST_CNTL_STATUS) != I2C_MST_CNTL_STATUS_BUS_BUSY) break; usleep_range(500, 600); } while (time_is_after_jiffies(target)); if (time_is_before_jiffies(target)) { dev_err(i2cd->dev, "i2c timeout error %x\n", val); return -ETIMEDOUT; } val = readl(i2cd->regs + I2C_MST_CNTL); switch (val & I2C_MST_CNTL_STATUS) { case I2C_MST_CNTL_STATUS_OKAY: return 0; case I2C_MST_CNTL_STATUS_NO_ACK: return -ENXIO; case I2C_MST_CNTL_STATUS_TIMEOUT: return -ETIMEDOUT; default: return 0; } }
/** * igb_ptp_tx_work * @work: pointer to work struct * * This work function polls the TSYNCTXCTL valid bit to determine when a * timestamp has been taken for the current stored skb. **/ void igb_ptp_tx_work(struct work_struct *work) { struct igb_adapter *adapter = container_of(work, struct igb_adapter, ptp_tx_work); struct e1000_hw *hw = &adapter->hw; u32 tsynctxctl; if (!adapter->ptp_tx_skb) return; if (time_is_before_jiffies(adapter->ptp_tx_start + IGB_PTP_TX_TIMEOUT)) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; adapter->tx_hwtstamp_timeouts++; dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); return; } tsynctxctl = rd32(E1000_TSYNCTXCTL); if (tsynctxctl & E1000_TSYNCTXCTL_VALID) igb_ptp_tx_hwtstamp(adapter); else /* reschedule to check later */ schedule_work(&adapter->ptp_tx_work); }
/* * __ratelimit - rate limiting * @rs: ratelimit_state data * * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks * in every @rs->ratelimit_jiffies */ int __ratelimit(struct ratelimit_state *rs) { unsigned long flags; if (!rs->interval) return 1; spin_lock_irqsave(&ratelimit_lock, flags); if (!rs->begin) rs->begin = jiffies; if (time_is_before_jiffies(rs->begin + rs->interval)) { if (rs->missed) printk(KERN_WARNING "%s: %d callbacks suppressed\n", __func__, rs->missed); rs->begin = 0; rs->printed = 0; rs->missed = 0; } if (rs->burst && rs->burst > rs->printed) goto print; rs->missed++; spin_unlock_irqrestore(&ratelimit_lock, flags); return 0; print: rs->printed++; spin_unlock_irqrestore(&ratelimit_lock, flags); return 1; }
/** * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched * @adapter: private network adapter structure * * This watchdog task is scheduled to detect error case where hardware has * dropped an Rx packet that was timestamped when the ring is full. The * particular error is rare but leaves the device in a state unable to timestamp * any future packets. **/ void igb_ptp_rx_hang(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct igb_ring *rx_ring; u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); unsigned long rx_event; int n; if (hw->mac.type != e1000_82576) return; /* If we don't have a valid timestamp in the registers, just update the * timeout counter and exit */ if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) { adapter->last_rx_ptp_check = jiffies; return; } /* Determine the most recent watchdog or rx_timestamp event */ rx_event = adapter->last_rx_ptp_check; for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; if (time_after(rx_ring->last_rx_timestamp, rx_event)) rx_event = rx_ring->last_rx_timestamp; } /* Only need to read the high RXSTMP register to clear the lock */ if (time_is_before_jiffies(rx_event + 5 * HZ)) { rd32(E1000_RXSTMPH); adapter->last_rx_ptp_check = jiffies; adapter->rx_hwtstamp_cleared++; dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang"); } }
/* * Check the amount of free space and suspend/resume accordingly. */ static int check_free_space(struct bsd_acct_struct *acct) { struct kstatfs sbuf; if (time_is_before_jiffies(acct->needcheck)) goto out; /* May block */ if (vfs_statfs(&acct->file->f_path, &sbuf)) goto out; if (acct->active) { u64 suspend = sbuf.f_blocks * SUSPEND; do_div(suspend, 100); if (sbuf.f_bavail <= suspend) { acct->active = 0; pr_info("Process accounting paused\n"); } } else { u64 resume = sbuf.f_blocks * RESUME; do_div(resume, 100); if (sbuf.f_bavail >= resume) { acct->active = 1; pr_info("Process accounting resumed\n"); } } acct->needcheck = jiffies + ACCT_TIMEOUT*HZ; out: return acct->active; }
static int at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { struct at86rf230_local *lp = hw->priv; struct at86rf230_state_change *ctx = &lp->tx; lp->tx_skb = skb; lp->tx_retry = 0; /* After 5 minutes in PLL and the same frequency we run again the * calibration loops which is recommended by at86rf2xx datasheets. * * The calibration is initiate by a state change from TRX_OFF * to TX_ON, the lp->cal_timeout should be reinit by state_delay * function then to start in the next 5 minutes. */ if (time_is_before_jiffies(lp->cal_timeout)) { lp->is_tx_from_off = true; at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF, at86rf230_xmit_start, false); } else { at86rf230_xmit_start(ctx); } return 0; }
/** * ixgbe_ptp_tx_hwtstamp_work * @work: pointer to the work struct * * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware * timestamp has been taken for the current skb. It is necesary, because the * descriptor's "done" bit does not correlate with the timestamp event. */ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) { struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, ptp_tx_work); struct ixgbe_hw *hw = &adapter->hw; bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + IXGBE_PTP_TX_TIMEOUT); u32 tsynctxctl; /* we have to have a valid skb */ if (!adapter->ptp_tx_skb) return; if (timeout) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; adapter->tx_hwtstamp_timeouts++; e_warn(drv, "clearing Tx Timestamp hang"); return; } tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) ixgbe_ptp_tx_hwtstamp(adapter); else /* reschedule to keep checking if it's not available yet */ schedule_work(&adapter->ptp_tx_work); }
/** * ixgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched * @adapter: private network adapter structure * * this watchdog task is scheduled to detect error case where hardware has * dropped an Rx packet that was timestamped when the ring is full. The * particular error is rare but leaves the device in a state unable to timestamp * any future packets. */ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring *rx_ring; u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); unsigned long rx_event; int n; /* if we don't have a valid timestamp in the registers, just update the * timeout counter and exit */ if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) { adapter->last_rx_ptp_check = jiffies; return; } /* determine the most recent watchdog or rx_timestamp event */ rx_event = adapter->last_rx_ptp_check; for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; if (time_after(rx_ring->last_rx_timestamp, rx_event)) rx_event = rx_ring->last_rx_timestamp; } /* only need to read the high RXSTMP register to clear the lock */ if (time_is_before_jiffies(rx_event + 5*HZ)) { IXGBE_READ_REG(hw, IXGBE_RXSTMPH); adapter->last_rx_ptp_check = jiffies; adapter->rx_hwtstamp_cleared++; e_warn(drv, "clearing RX Timestamp hang"); } }
static void ctrl_write_callback(struct urb *urb) { #ifdef HTC_DEBUG_QMI_STUCK struct ctrl_write_context *context = urb->context; struct rmnet_ctrl_dev *dev = context->dev; #else struct rmnet_ctrl_dev *dev = urb->context; #endif #ifdef HTC_DEBUG_QMI_STUCK del_timer(&context->timer); if (unlikely(time_is_before_jiffies(context->start_jiffies + HZ))) pr_err("[%s] urb %p takes %d msec to complete.\n", __func__, urb, jiffies_to_msecs(jiffies - context->start_jiffies)); #endif if (urb->status) { dev->tx_ctrl_err_cnt++; pr_debug_ratelimited("Write status/size %d/%d\n", urb->status, urb->actual_length); } #ifdef HTC_LOG_RMNET_USB_CTRL log_rmnet_usb_ctrl_event(dev->intf, "Tx cb", urb->actual_length); #endif kfree(urb->setup_packet); kfree(urb->transfer_buffer); usb_free_urb(urb); usb_autopm_put_interface_async(dev->intf); #ifdef HTC_DEBUG_QMI_STUCK kfree(context); #endif }
/** * i40e_ptp_tx_hang - Detect error case when Tx timestamp register is hung * @pf: The PF private data structure * * This watchdog task is run periodically to make sure that we clear the Tx * timestamp logic if we don't obtain a timestamp in a reasonable amount of * time. It is unexpected in the normal case but if it occurs it results in * permanently preventing timestamps of future packets. **/ void i40e_ptp_tx_hang(struct i40e_pf *pf) { struct sk_buff *skb; if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) return; /* Nothing to do if we're not already waiting for a timestamp */ if (!test_bit(__I40E_PTP_TX_IN_PROGRESS, pf->state)) return; /* We already have a handler routine which is run when we are notified * of a Tx timestamp in the hardware. If we don't get an interrupt * within a second it is reasonable to assume that we never will. */ if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) { skb = pf->ptp_tx_skb; pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); /* Free the skb after we clear the bitlock */ dev_kfree_skb_any(skb); pf->tx_hwtstamp_timeouts++; } }
static int mxcmci_pio_data_transfer(struct mxcmci_priv *priv) { struct mmc_data *data = priv->data; unsigned long *buf; u8 *buf8; int no_of_bytes; int no_of_words; unsigned long timeout_jiffies; int i; u32 temp_data; long timeout; buf = (unsigned long *)(sg_virt(data->sg)); buf8 = (u8 *)buf; /* calculate the number of bytes and words requested for transfer */ no_of_bytes = data->blocks * data->blksz; no_of_words = (no_of_bytes + 3) / 4; dev_dbg(priv->host->parent, "no_of_words = %d\n", no_of_words); if (data->flags & MMC_DATA_READ) { timeout_jiffies = jiffies + msecs_to_jiffies(1000); for (i = 0; i < no_of_words; i++) { while (1) { if (__raw_readl(priv->base + MMC_STATUS) & (STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE)) break; if (time_is_before_jiffies(timeout_jiffies)) { dev_err(priv->host->parent, "wait read ready timeout\n"); data->error = -ETIMEDOUT; break; } } temp_data = __raw_readl(priv->base + MMC_BUFFER_ACCESS); if (no_of_bytes >= 4) { *buf++ = temp_data; no_of_bytes -= 4; } else { do { *buf8++ = temp_data; temp_data = temp_data >> 8; } while (--no_of_bytes); } } if (!data->error) { mxcmci_interrupt_enable(priv, INT_CNTR_READ_OP_DONE); timeout = wait_for_completion_timeout(&priv->comp_read_op_done, msecs_to_jiffies(1000)); if (timeout == 0) { dev_err(priv->host->parent, "wait read_op_done timeout\n"); data->error = -ETIMEDOUT; } } } else {
static int sdhci_bcm_kona_sd_reset(struct sdhci_host *host) { unsigned int val; unsigned long timeout; /* This timeout should be sufficent for core to reset */ timeout = jiffies + msecs_to_jiffies(100); /* reset the host using the top level reset */ val = sdhci_readl(host, KONA_SDHOST_CORECTRL); val |= KONA_SDHOST_RESET; sdhci_writel(host, val, KONA_SDHOST_CORECTRL); while (!(sdhci_readl(host, KONA_SDHOST_CORECTRL) & KONA_SDHOST_RESET)) { if (time_is_before_jiffies(timeout)) { pr_err("Error: sd host is stuck in reset!!!\n"); return -EFAULT; } } /* bring the host out of reset */ val = sdhci_readl(host, KONA_SDHOST_CORECTRL); val &= ~KONA_SDHOST_RESET; /* * Back-to-Back register write needs a delay of 1ms at bootup (min 10uS) * Back-to-Back writes to same register needs delay when SD bus clock * is very low w.r.t AHB clock, mainly during boot-time and during card * insert-removal. */ usleep_range(1000, 5000); sdhci_writel(host, val, KONA_SDHOST_CORECTRL); return 0; }
static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct sun4i_mdio_data *data = bus->priv; unsigned long timeout_jiffies; /* issue the phy address and reg */ writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG); /* pull up the phy io line */ writel(0x1, data->membase + EMAC_MAC_MCMD_REG); /* Wait read complete */ timeout_jiffies = jiffies + MDIO_TIMEOUT; while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) { if (time_is_before_jiffies(timeout_jiffies)) return -ETIMEDOUT; msleep(1); } /* push down the phy io line */ writel(0x0, data->membase + EMAC_MAC_MCMD_REG); /* and write data */ writel(value, data->membase + EMAC_MAC_MWTD_REG); return 0; }
/* * Check the amount of free space and suspend/resume accordingly. */ static int check_free_space(struct bsd_acct_struct *acct, struct file *file) { struct kstatfs sbuf; int res; int act; u64 resume; u64 suspend; spin_lock(&acct_lock); res = acct->active; if (!file || time_is_before_jiffies(acct->needcheck)) goto out; spin_unlock(&acct_lock); /* May block */ if (vfs_statfs(&file->f_path, &sbuf)) return res; suspend = sbuf.f_blocks * SUSPEND; resume = sbuf.f_blocks * RESUME; do_div(suspend, 100); do_div(resume, 100); if (sbuf.f_bavail <= suspend) act = -1; else if (sbuf.f_bavail >= resume) act = 1; else act = 0; /* * If some joker switched acct->file under us we'ld better be * silent and _not_ touch anything. */ spin_lock(&acct_lock); if (file != acct->file) { if (act) res = act > 0; goto out; } if (acct->active) { if (act < 0) { acct->active = 0; pr_info("Process accounting paused\n"); } } else { if (act > 0) { acct->active = 1; pr_info("Process accounting resumed\n"); } } acct->needcheck = jiffies + ACCT_TIMEOUT*HZ; res = acct->active; out: spin_unlock(&acct_lock); return res; }
/* IAMROOT-12AB: * ------------- * 함수 결과가 1인 경우 계속 진행한다는 의미 */ int ___ratelimit(struct ratelimit_state *rs, const char *func) { unsigned long flags; int ret; /* IAMROOT-12AB: * ------------- * interval이 0으로 설정되면 제한 없이 계속 진행한다는 의미 */ if (!rs->interval) return 1; /* * If we contend on this state's lock then almost * by definition we are too busy to print a message, * in addition to the one that will be printed by * the entity that is holding the lock already: */ if (!raw_spin_trylock_irqsave(&rs->lock, flags)) return 0; /* IAMROOT-12AB: * ------------- * begin이 0인 경우 bigin에 현재 jiffies를 기록한다. */ if (!rs->begin) rs->begin = jiffies; /* IAMROOT-12AB: * ------------ - * interval 이후에는 missed 카운트가 발생한 경우 정산하여 출력한다. * (출력시에 더 이상 호출을 포기한 callback 함수명과 missed 카운트를 나타낸다) */ if (time_is_before_jiffies(rs->begin + rs->interval)) { if (rs->missed) printk(KERN_WARNING "%s: %d callbacks suppressed\n", func, rs->missed); rs->begin = 0; rs->printed = 0; rs->missed = 0; } /* IAMROOT-12AB: * ------------- * 정상 호출 시 printed가 증가되고, 그렇지 않은 경우 missed가 증가된다. */ if (rs->burst && rs->burst > rs->printed) { rs->printed++; ret = 1; } else { rs->missed++; ret = 0; } raw_spin_unlock_irqrestore(&rs->lock, flags); return ret; }
/** * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung * @vsi: The VSI with the rings relevant to 1588 * * This watchdog task is scheduled to detect error case where hardware has * dropped an Rx packet that was timestamped when the ring is full. The * particular error is rare but leaves the device in a state unable to timestamp * any future packets. **/ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_ring *rx_ring; unsigned long rx_event; u32 prttsyn_stat; int n; /* Since we cannot turn off the Rx timestamp logic if the device is * configured for Tx timestamping, we check if Rx timestamping is * configured. We don't want to spuriously warn about Rx timestamp * hangs if we don't care about the timestamps. */ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) return; prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); /* Unless all four receive timestamp registers are latched, we are not * concerned about a possible PTP Rx hang, so just update the timeout * counter and exit. */ if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK << I40E_PRTTSYN_STAT_1_RXT0_SHIFT) | (I40E_PRTTSYN_STAT_1_RXT1_MASK << I40E_PRTTSYN_STAT_1_RXT1_SHIFT) | (I40E_PRTTSYN_STAT_1_RXT2_MASK << I40E_PRTTSYN_STAT_1_RXT2_SHIFT) | (I40E_PRTTSYN_STAT_1_RXT3_MASK << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) { pf->last_rx_ptp_check = jiffies; return; } /* Determine the most recent watchdog or rx_timestamp event. */ rx_event = pf->last_rx_ptp_check; for (n = 0; n < vsi->num_queue_pairs; n++) { rx_ring = vsi->rx_rings[n]; if (time_after(rx_ring->last_rx_timestamp, rx_event)) rx_event = rx_ring->last_rx_timestamp; } /* Only need to read the high RXSTMP register to clear the lock */ if (time_is_before_jiffies(rx_event + 5 * HZ)) { rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); pf->last_rx_ptp_check = jiffies; pf->rx_hwtstamp_cleared++; dev_warn(&vsi->back->pdev->dev, "%s: clearing Rx timestamp hang\n", __func__); } }
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev) { bool timeout = time_is_before_jiffies(mdev->last_overflow_check + mdev->overflow_period); if (timeout) { timecounter_read(&mdev->clock); mdev->last_overflow_check = jiffies; } }
/** * ixgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow * @adapter: private adapter struct * * this watchdog task periodically reads the timecounter * in order to prevent missing when the system time registers wrap * around. This needs to be run approximately twice a minute. */ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) { bool timeout = time_is_before_jiffies(adapter->last_overflow_check + IXGBE_OVERFLOW_PERIOD); struct timespec ts; if (timeout) { ixgbe_ptp_gettime_82599(&adapter->ptp_caps, &ts); adapter->last_overflow_check = jiffies; } }
void mlx5e_ptp_overflow_check(struct mlx5e_priv *priv) { bool timeout = time_is_before_jiffies(priv->tstamp.last_overflow_check + priv->tstamp.overflow_period); unsigned long flags; if (timeout) { write_lock_irqsave(&priv->tstamp.lock, flags); timecounter_read(&priv->tstamp.clock); write_unlock_irqrestore(&priv->tstamp.lock, flags); priv->tstamp.last_overflow_check = jiffies; } }
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev) { bool timeout = time_is_before_jiffies(mdev->last_overflow_check + mdev->overflow_period); unsigned long flags; if (timeout) { write_lock_irqsave(&mdev->clock_lock, flags); timecounter_read(&mdev->clock); write_unlock_irqrestore(&mdev->clock_lock, flags); mdev->last_overflow_check = jiffies; } }
/** * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung * @pf: The PF private data structure * @vsi: The VSI with the rings relevant to 1588 * * This watchdog task is scheduled to detect error case where hardware has * dropped an Rx packet that was timestamped when the ring is full. The * particular error is rare but leaves the device in a state unable to timestamp * any future packets. **/ void i40e_ptp_rx_hang(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; unsigned int i, cleared = 0; /* Since we cannot turn off the Rx timestamp logic if the device is * configured for Tx timestamping, we check if Rx timestamping is * configured. We don't want to spuriously warn about Rx timestamp * hangs if we don't care about the timestamps. */ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) return; spin_lock_bh(&pf->ptp_rx_lock); /* Update current latch times for Rx events */ i40e_ptp_get_rx_events(pf); /* Check all the currently latched Rx events and see whether they have * been latched for over a second. It is assumed that any timestamp * should have been cleared within this time, or else it was captured * for a dropped frame that the driver never received. Thus, we will * clear any timestamp that has been latched for over 1 second. */ for (i = 0; i < 4; i++) { if ((pf->latch_event_flags & BIT(i)) && time_is_before_jiffies(pf->latch_events[i] + HZ)) { rd32(hw, I40E_PRTTSYN_RXTIME_H(i)); pf->latch_event_flags &= ~BIT(i); cleared++; } } spin_unlock_bh(&pf->ptp_rx_lock); /* Log a warning if more than 2 timestamps got dropped in the same * check. We don't want to warn about all drops because it can occur * in normal scenarios such as PTP frames on multicast addresses we * aren't listening to. However, administrator should know if this is * the reason packets aren't receiving timestamps. */ if (cleared > 2) dev_dbg(&pf->pdev->dev, "Dropped %d missed RXTIME timestamp events\n", cleared); /* Finally, update the rx_hwtstamp_cleared counter */ pf->rx_hwtstamp_cleared += cleared; }
static int bcm_kona_sd_reset(struct sdio_dev *dev) { struct sdhci_host *host = dev->host; unsigned int val; #ifdef CONFIG_ARCH_CAPRI unsigned int tries = 10000; #endif unsigned long timeout; /* Reset host controller by setting 'Software Reset for All' */ sdhci_writeb(host, SDHCI_RESET_ALL, SDHCI_SOFTWARE_RESET); /* Wait for 100 ms max (100ms timeout is taken from sdhci.c) */ timeout = jiffies + msecs_to_jiffies(100); while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & SDHCI_RESET_ALL) { if (time_is_before_jiffies(timeout)) { dev_err(dev->dev, "Error: sd host is in reset!!!\n"); return -EFAULT; } } /* reset the host using the top level reset */ val = sdhci_readl(host, KONA_SDHOST_CORECTRL); val |= KONA_SDHOST_RESET; sdhci_writel(host, val, KONA_SDHOST_CORECTRL); do { val = sdhci_readl(host, KONA_SDHOST_CORECTRL); #ifdef CONFIG_ARCH_CAPRI if (--tries <= 0) break; #endif } while (0 == (val & KONA_SDHOST_RESET)); /* bring the host out of reset */ val = sdhci_readl(host, KONA_SDHOST_CORECTRL); val &= ~KONA_SDHOST_RESET; /* Back-to-Back register write needs a delay of 1ms * at bootup (min 10uS) */ udelay(1000); sdhci_writel(host, val, KONA_SDHOST_CORECTRL); return 0; }
static bool zd_tx_timeout(struct zd_usb *usb) { struct zd_usb_tx *tx = &usb->tx; struct sk_buff_head *q = &tx->submitted_skbs; struct sk_buff *skb, *skbnext; struct ieee80211_tx_info *info; unsigned long flags, trans_start; bool have_timedout = false; spin_lock_irqsave(&q->lock, flags); skb_queue_walk_safe(q, skb, skbnext) { info = IEEE80211_SKB_CB(skb); trans_start = (unsigned long)info->rate_driver_data[1]; if (time_is_before_jiffies(trans_start + ZD_TX_TIMEOUT)) { have_timedout = true; break; } }
/* * __ratelimit - rate limiting * @rs: ratelimit_state data * @func: name of calling function * * This enforces a rate limit: not more than @rs->burst callbacks * in every @rs->interval * * RETURNS: * 0 means callbacks will be suppressed. * 1 means go ahead and do it. */ int ___ratelimit(struct ratelimit_state *rs, const char *func) { unsigned long flags; int ret; if (!rs->interval) return 1; /* * If we contend on this state's lock then almost * by definition we are too busy to print a message, * in addition to the one that will be printed by * the entity that is holding the lock already: */ if (!spin_trylock_irqsave(&rs->lock, flags)) return 0; if (!rs->begin) rs->begin = jiffies; if (time_is_before_jiffies(rs->begin + rs->interval)) { if (rs->missed) #ifdef CONFIG_DEBUG_PRINTK printk(KERN_WARNING "%s: %d callbacks suppressed\n", func, rs->missed); #else ; #endif rs->begin = 0; rs->printed = 0; rs->missed = 0; } if (rs->burst && rs->burst > rs->printed) { rs->printed++; ret = 1; } else { rs->missed++; ret = 0; } spin_unlock_irqrestore(&rs->lock, flags); return ret; }
/** * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung * @vsi: The VSI with the rings relevant to 1588 * * This watchdog task is scheduled to detect error case where hardware has * dropped an Rx packet that was timestamped when the ring is full. The * particular error is rare but leaves the device in a state unable to timestamp * any future packets. **/ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int i; /* Since we cannot turn off the Rx timestamp logic if the device is * configured for Tx timestamping, we check if Rx timestamping is * configured. We don't want to spuriously warn about Rx timestamp * hangs if we don't care about the timestamps. */ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) return; spin_lock_bh(&pf->ptp_rx_lock); /* Update current latch times for Rx events */ i40e_ptp_get_rx_events(pf); /* Check all the currently latched Rx events and see whether they have * been latched for over a second. It is assumed that any timestamp * should have been cleared within this time, or else it was captured * for a dropped frame that the driver never received. Thus, we will * clear any timestamp that has been latched for over 1 second. */ for (i = 0; i < 4; i++) { if ((pf->latch_event_flags & BIT(i)) && time_is_before_jiffies(pf->latch_events[i] + HZ)) { rd32(hw, I40E_PRTTSYN_RXTIME_H(i)); pf->latch_event_flags &= ~BIT(i); pf->rx_hwtstamp_cleared++; dev_warn(&pf->pdev->dev, "Clearing a missed Rx timestamp event for RXTIME[%d]\n", i); } } spin_unlock_bh(&pf->ptp_rx_lock); }
/* Wait for the SMI unit to be ready for another operation */ static int orion_mdio_wait_ready(struct mii_bus *bus) { struct orion_mdio_dev *dev = bus->priv; unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT); unsigned long end = jiffies + timeout; int timedout = 0; while (1) { if (orion_mdio_smi_is_done(dev)) return 0; else if (timedout) break; if (dev->err_interrupt <= 0) { usleep_range(MVMDIO_SMI_POLL_INTERVAL_MIN, MVMDIO_SMI_POLL_INTERVAL_MAX); if (time_is_before_jiffies(end)) ++timedout; } else { /* wait_event_timeout does not guarantee a delay of at * least one whole jiffie, so timeout must be no less * than two. */ if (timeout < 2) timeout = 2; wait_event_timeout(dev->smi_busy_wait, orion_mdio_smi_is_done(dev), timeout); ++timedout; } } dev_err(bus->parent, "Timeout: SMI busy for too long\n"); return -ETIMEDOUT; }
/** * dma_alloc_from_contiguous() - allocate pages from contiguous area * @dev: Pointer to device for which the allocation is performed. * @count: Requested number of pages. * @align: Requested alignment of pages (in PAGE_SIZE order). * * This function allocates memory buffer for specified device. It uses * device specific contiguous memory area if available or the default * global one. Requires architecture specific get_dev_cma_area() helper * function. */ struct page *dma_alloc_from_contiguous(struct device *dev, int count, unsigned int align) { unsigned long mask, pfn, pageno, start = 0; unsigned long retry_timeout, retry_cnt; struct cma *cma = dev_get_cma_area(dev); struct page *page = NULL; int ret; if (!cma || !cma->count) return NULL; if (align > CONFIG_CMA_ALIGNMENT) align = CONFIG_CMA_ALIGNMENT; pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, count, align); if (!count) return NULL; retry_timeout = jiffies + msecs_to_jiffies(500); retry_cnt = 0; mask = (1 << align) - 1; mutex_lock(&cma_mutex); for (;;) { pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, start, count, mask); if (pageno >= cma->count) { if (start == 0 || (time_is_before_jiffies(retry_timeout) && retry_cnt != 0)) break; cond_resched(); retry_cnt++; start = 0; continue; } pfn = cma->base_pfn + pageno; ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); if (ret == 0) { bitmap_set(cma->bitmap, pageno, count); page = pfn_to_page(pfn); adjust_managed_cma_page_count(page_zone(page), -count); break; } else if (ret != -EBUSY) { break; } pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); /* try again with a bit different memory target */ start = pageno + mask + 1; } mutex_unlock(&cma_mutex); pr_debug("%s(): returned %p\n", __func__, page); return page; }
static void xpc_do_exit(enum xp_retval reason) { short partid; int active_part_count, printed_waiting_msg = 0; struct xpc_partition *part; unsigned long printmsg_time, disengage_timeout = 0; /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ DBUG_ON(xpc_exiting == 1); /* * Let the heartbeat checker thread and the discovery thread * (if one is running) know that they should exit. Also wake up * the heartbeat checker thread in case it's sleeping. */ xpc_exiting = 1; wake_up_interruptible(&xpc_activate_IRQ_wq); /* wait for the discovery thread to exit */ wait_for_completion(&xpc_discovery_exited); /* wait for the heartbeat checker thread to exit */ wait_for_completion(&xpc_hb_checker_exited); /* sleep for a 1/3 of a second or so */ (void)msleep_interruptible(300); /* wait for all partitions to become inactive */ printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); xpc_disengage_timedout = 0; do { active_part_count = 0; for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (xpc_partition_disengaged(part) && part->act_state == XPC_P_AS_INACTIVE) { continue; } active_part_count++; XPC_DEACTIVATE_PARTITION(part, reason); if (part->disengage_timeout > disengage_timeout) disengage_timeout = part->disengage_timeout; } if (xpc_any_partition_engaged()) { if (time_is_before_jiffies(printmsg_time)) { dev_info(xpc_part, "waiting for remote " "partitions to deactivate, timeout in " "%ld seconds\n", (disengage_timeout - jiffies) / HZ); printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); printed_waiting_msg = 1; } } else if (active_part_count > 0) { if (printed_waiting_msg) { dev_info(xpc_part, "waiting for local partition" " to deactivate\n"); printed_waiting_msg = 0; } } else { if (!xpc_disengage_timedout) { dev_info(xpc_part, "all partitions have " "deactivated\n"); } break; } /* sleep for a 1/3 of a second or so */ (void)msleep_interruptible(300); } while (1); DBUG_ON(xpc_any_partition_engaged()); DBUG_ON(xpc_any_hbs_allowed() != 0); xpc_teardown_rsvd_page(); if (reason == xpUnloading) { (void)unregister_die_notifier(&xpc_die_notifier); (void)unregister_reboot_notifier(&xpc_reboot_notifier); } /* clear the interface to XPC's functions */ xpc_clear_interface(); if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); xpc_teardown_partitions(); if (is_shub()) xpc_exit_sn2(); else if (is_uv()) xpc_exit_uv(); }
static void xpc_do_exit(enum xp_retval reason) { short partid; int active_part_count, printed_waiting_msg = 0; struct xpc_partition *part; unsigned long printmsg_time, disengage_timeout = 0; /* */ DBUG_ON(xpc_exiting == 1); /* */ xpc_exiting = 1; wake_up_interruptible(&xpc_activate_IRQ_wq); /* */ wait_for_completion(&xpc_discovery_exited); /* */ wait_for_completion(&xpc_hb_checker_exited); /* */ (void)msleep_interruptible(300); /* */ printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); xpc_disengage_timedout = 0; do { active_part_count = 0; for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (xpc_partition_disengaged(part) && part->act_state == XPC_P_AS_INACTIVE) { continue; } active_part_count++; XPC_DEACTIVATE_PARTITION(part, reason); if (part->disengage_timeout > disengage_timeout) disengage_timeout = part->disengage_timeout; } if (xpc_arch_ops.any_partition_engaged()) { if (time_is_before_jiffies(printmsg_time)) { dev_info(xpc_part, "waiting for remote " "partitions to deactivate, timeout in " "%ld seconds\n", (disengage_timeout - jiffies) / HZ); printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); printed_waiting_msg = 1; } } else if (active_part_count > 0) { if (printed_waiting_msg) { dev_info(xpc_part, "waiting for local partition" " to deactivate\n"); printed_waiting_msg = 0; } } else { if (!xpc_disengage_timedout) { dev_info(xpc_part, "all partitions have " "deactivated\n"); } break; } /* */ (void)msleep_interruptible(300); } while (1); DBUG_ON(xpc_arch_ops.any_partition_engaged()); xpc_teardown_rsvd_page(); if (reason == xpUnloading) { (void)unregister_die_notifier(&xpc_die_notifier); (void)unregister_reboot_notifier(&xpc_reboot_notifier); } /* */ xpc_clear_interface(); if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); xpc_teardown_partitions(); if (is_shub()) xpc_exit_sn2(); else if (is_uv()) xpc_exit_uv(); }