static void mdp_vsync_handler(void *data) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data; if (vsync_clk_status == 0) { pr_debug("Warning: vsync clk is disabled\n"); mfd->vsync_handler_pending = FALSE; return; } if (mfd->use_mdp_vsync) { #ifdef MDP_HW_VSYNC if (mfd->panel_power_on) { MDP_OUTP(MDP_BASE + MDP_SYNC_STATUS_0, vsync_load_cnt); #ifdef CONFIG_FB_MSM_MDP40 if (mdp_hw_revision < MDP4_REVISION_V2_1) MDP_OUTP(MDP_BASE + MDP_SYNC_STATUS_1, vsync_load_cnt); #endif } #endif } else { mfd->last_vsync_timetick = ktime_get_real(); } mfd->vsync_handler_pending = FALSE; }
int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) { struct compat_timespec __user *ctv; int err; struct timespec ts; if (COMPAT_USE_64BIT_TIME) return sock_get_timestampns (sk, userstamp); ctv = (struct compat_timespec __user *) userstamp; err = -ENOENT; if (!sock_flag(sk, SOCK_TIMESTAMP)) sock_enable_timestamp(sk, SOCK_TIMESTAMP); ts = ktime_to_timespec(sk->sk_stamp); if (ts.tv_sec == -1) return err; if (ts.tv_sec == 0) { sk->sk_stamp = ktime_get_real(); ts = ktime_to_timespec(sk->sk_stamp); } err = 0; if (put_user(ts.tv_sec, &ctv->tv_sec) || put_user(ts.tv_nsec, &ctv->tv_nsec)) err = -EFAULT; return err; }
/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */ static int rmnet_cause_wakeup(struct rmnet_private *p) { int ret = 0; ktime_t now; if (p->timeout_us == 0) /* Check if disabled */ return 0; /* Start timer on a wakeup packet */ if (p->active_countdown == 0) { ret = 1; now = ktime_get_real(); p->last_packet = now; if (in_suspend) { queue_delayed_work(rmnet_wq, &p->work, usecs_to_jiffies(p->timeout_us)); } else { queue_delayed_work(rmnet_wq, &p->work, usecs_to_jiffies(POLL_DELAY)); } } if (in_suspend) { p->active_countdown++; } else { p->active_countdown = p->timeout_us / POLL_DELAY; } return ret; }
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) { struct mlx4_dev *dev = mdev->dev; u64 temp_mult; memset(&mdev->cycles, 0, sizeof(mdev->cycles)); mdev->cycles.read = mlx4_en_read_clock; mdev->cycles.mask = CLOCKSOURCE_MASK(48); /* * we have hca_core_clock in MHz, so to translate cycles to nsecs * we need to divide cycles by freq and multiply by 1000; * in order to get precise result we shift left the value, * since we don't have floating point there; * at the end shift result back */ temp_mult = div_u64(((1ull * 1000) << 29), dev->caps.hca_core_clock); mdev->cycles.mult = (u32)temp_mult; mdev->cycles.shift = 29; timecounter_init(&mdev->clock, &mdev->cycles, ktime_to_ns(ktime_get_real())); memset(&mdev->compare, 0, sizeof(mdev->compare)); mdev->compare.source = &mdev->clock; mdev->compare.target = ktime_get_real; mdev->compare.num_samples = 10; timecompare_update(&mdev->compare, 0); }
/** * ixgbe_ptp_reset * @adapter: the ixgbe private board structure * * When the MAC resets, all of the hardware configuration for timesync is * reset. This function should be called to re-enable the device for PTP, * using the last known settings. However, we do lose the current clock time, * so we fallback to resetting it based on the kernel's realtime clock. * * This function will maintain the hwtstamp_config settings, and it retriggers * the SDP output if it's enabled. */ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; unsigned long flags; /* reset the hardware timestamping mode */ ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); switch (hw->mac.type) { case ixgbe_mac_X540: case ixgbe_mac_82599EB: ixgbe_ptp_start_cyclecounter(adapter); spin_lock_irqsave(&adapter->tmreg_lock, flags); timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ktime_to_ns(ktime_get_real())); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); adapter->last_overflow_check = jiffies; break; default: return; } /* * Now that the shift has been calculated and the systime * registers reset, (re-)enable the Clock out feature */ if (adapter->ptp_setup_sdp) adapter->ptp_setup_sdp(adapter); }
/** * fec_ptp_start_cyclecounter - create the cycle counter from hw * @ndev: network device * * this function initializes the timecounter and cyclecounter * structures for use in generated a ns counter from the arbitrary * fixed point cycles registers in the hardware. */ void fec_ptp_start_cyclecounter(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); unsigned long flags; int inc; inc = 1000000000 / fep->cycle_speed; /* grab the ptp lock */ spin_lock_irqsave(&fep->tmreg_lock, flags); /* 1ns counter */ writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); /* use 31-bit timer counter */ writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD); writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST, fep->hwp + FEC_ATIME_CTRL); memset(&fep->cc, 0, sizeof(fep->cc)); fep->cc.read = fec_ptp_read; fep->cc.mask = CLOCKSOURCE_MASK(31); fep->cc.shift = 31; fep->cc.mult = FEC_CC_MULT; /* reset the ns time counter */ timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real())); spin_unlock_irqrestore(&fep->tmreg_lock, flags); }
enum hrtimer_restart mdp_dma2_vsync_hrtimer_handler(struct hrtimer *ht) { struct msm_fb_data_type *mfd = NULL; mfd = container_of(ht, struct msm_fb_data_type, dma_hrtimer); mdp_pipe_kickoff(MDP_DMA2_TERM, mfd); if (msm_fb_debug_enabled) { ktime_t t; int usec_diff; int actual_wait; t = ktime_get_real(); actual_wait = ktime_to_us(ktime_sub(t, vt)); usec_diff = actual_wait - mdp_expected_usec_wait; if ((mdp_usec_diff_threshold < usec_diff) || (usec_diff < 0)) MSM_FB_DEBUG ("HRT Diff = %d usec Exp=%d usec Act=%d usec\n", usec_diff, mdp_expected_usec_wait, actual_wait); } return HRTIMER_NORESTART; }
static void cros_usbpd_log_check(struct work_struct *work) { struct logger_data *logger = container_of(to_delayed_work(work), struct logger_data, log_work); struct device *dev = logger->dev; struct ec_response_pd_log *r; int entries = 0; ktime_t now; while (entries++ < CROS_USBPD_MAX_LOG_ENTRIES) { r = ec_get_log_entry(logger); now = ktime_get_real(); if (IS_ERR(r)) { dev_dbg(dev, "Cannot get PD log %ld\n", PTR_ERR(r)); break; } if (r->type == PD_EVENT_NO_ENTRY) break; cros_usbpd_print_log_entry(r, now); } queue_delayed_work(logger->log_workqueue, &logger->log_work, CROS_USBPD_LOG_UPDATE_DELAY); }
static void rk2918_get_bat_capacity(struct rk2918_battery_data *bat) { s32 deltatime = 0; ktime_t ktmietmp; struct timespec ts; ktmietmp = ktime_get_real(); ts = ktime_to_timespec(ktmietmp); deltatime = ts.tv_sec - batteryspendcnt; if (first_flag || (openfailflag && (openfailcount > 1))) //��δ��տ�ʼ����ʱ������β��� { if(first_flag == 1) first_flag--; openfailcount--; printk("%s,first_flag=%d,openfailflag=%d,openfailcount=%d\n",__func__,first_flag,openfailflag,openfailcount); gBatCapacity = rk2918_battery_load_capacity(); if (gBatCapacity == 0) gBatCapacity = 1; } else if ((deltatime > 1) && (first_flag == 0) &&(!time_chg_flag))//��������֮��ĵ����ظ�,����ʮ���� { //printk("---->time_chg_flag =%d\n", time_chg_flag); gBatCapacity = rk2918_battery_resume_get_Capacity(deltatime); } else { gBatCapacity = rk2918_get_bat_capacity_ext(gBatVoltage); } if(time_chg_flag) time_chg_flag = 0; batteryspendcnt = ts.tv_sec; }
static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before) { get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; get_lppaca()->idle = 0; return ktime_to_us(ktime_sub(ktime_get_real(), kt_before)); }
static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, unsigned int flags) { struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; int req; u32 lkf; char strname[GDLM_STRNAME_BYTES] = ""; req = make_mode(req_state); lkf = make_flags(gl, flags, req); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); if (gl->gl_lksb.sb_lkid) { gfs2_update_request_times(gl); } else { memset(strname, ' ', GDLM_STRNAME_BYTES - 1); strname[GDLM_STRNAME_BYTES - 1] = '\0'; gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); gl->gl_dstamp = ktime_get_real(); } /* * Submit the actual lock request. */ return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); }
/** * igb_ptp_reset - Re-enable the adapter for PTP following a reset. * @adapter: Board private structure. * * This function handles the reset work required to re-enable the PTP device. **/ void igb_ptp_reset(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; unsigned long flags; if (!(adapter->flags & IGB_FLAG_PTP)) return; /* reset the tstamp_config */ igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); spin_lock_irqsave(&adapter->tmreg_lock, flags); switch (adapter->hw.mac.type) { case e1000_82576: /* Dial the nominal frequency. */ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); break; case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); E1000_WRITE_REG(hw, E1000_TSSDP, 0x0); E1000_WRITE_REG(hw, E1000_TSIM, TSYNC_INTERRUPTS); E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS); break; default: /* No work to do. */ goto out; } /* Re-initialize the timer. */ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real()); igb_ptp_write_i210(adapter, &ts64); } else { timecounter_init(&adapter->tc, &adapter->cc, ktime_to_ns(ktime_get_real())); } out: spin_unlock_irqrestore(&adapter->tmreg_lock, flags); }
static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp) { ktime_t sys = ktime_get_real(); pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n", __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec, sys.tv.nsec, cmp->offset, cmp->skew); }
/** * i40e_ptp_init - Initialize the 1588 support after device probe or reset * @pf: Board private structure * * This function sets device up for 1588 support. The first time it is run, it * will create a PHC clock device. It does not create a clock device if one * already exists. It also reconfigures the device after a reset. **/ void i40e_ptp_init(struct i40e_pf *pf) { struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; struct i40e_hw *hw = &pf->hw; u32 pf_id; long err; /* Only one PF is assigned to control 1588 logic per port. Do not * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID */ pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> I40E_PRTTSYN_CTL0_PF_ID_SHIFT; if (hw->pf_id != pf_id) { pf->flags &= ~I40E_FLAG_PTP; dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n", __func__, netdev->name); return; } /* we have to initialize the lock first, since we can't control * when the user will enter the PHC device entry points */ spin_lock_init(&pf->tmreg_lock); /* ensure we have a clock device */ err = i40e_ptp_create_clock(pf); if (err) { pf->ptp_clock = NULL; dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", __func__); } else { struct timespec64 ts; u32 regval; dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, netdev->name); pf->flags |= I40E_FLAG_PTP; /* Ensure the clocks are running. */ regval = rd32(hw, I40E_PRTTSYN_CTL0); regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK; wr32(hw, I40E_PRTTSYN_CTL0, regval); regval = rd32(hw, I40E_PRTTSYN_CTL1); regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK; wr32(hw, I40E_PRTTSYN_CTL1, regval); /* Set the increment value per clock tick. */ i40e_ptp_set_increment(pf); /* reset timestamping mode */ i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config); /* Set the clock value. */ ts = ktime_to_timespec64(ktime_get_real()); i40e_ptp_settime(&pf->ptp_caps, &ts); } }
static int activity_stats_notifier(struct notifier_block *nb, unsigned long event, void *dummy) { printk("[%s]: begin notify event=%lu\r\n", __FUNCTION__, event); switch (event) { case PM_SUSPEND_PREPARE: suspend_time = ktime_get_real(); printk("[%s]: end notify 1 event=%lu\r\n", __FUNCTION__, event); break; case PM_POST_SUSPEND: suspend_time = ktime_sub(ktime_get_real(), suspend_time); last_transmit = ktime_sub(last_transmit, suspend_time); } printk("[%s]: end notify event=%lu\r\n", __FUNCTION__, event); return 0; }
static void rxrpc_call_timer_expired(unsigned long _call) { struct rxrpc_call *call = (struct rxrpc_call *)_call; _enter("%d", call->debug_id); if (call->state < RXRPC_CALL_COMPLETE) rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real()); }
static void msi_wmi_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; struct key_entry *key; union acpi_object *obj; acpi_status status; status = wmi_get_event_data(value, &response); if (status != AE_OK) { pr_info("bad event status 0x%x\n", status); return; } obj = (union acpi_object *)response.pointer; if (obj && obj->type == ACPI_TYPE_INTEGER) { int eventcode = obj->integer.value; pr_debug("Eventcode: 0x%x\n", eventcode); key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev, eventcode); if (!key) { pr_info("Unknown key pressed - %x\n", eventcode); goto msi_wmi_notify_exit; } if (event_wmi->quirk_last_pressed) { ktime_t cur = ktime_get_real(); ktime_t diff = ktime_sub(cur, last_pressed); /* Ignore event if any event happened in a 50 ms timeframe -> Key press may result in 10-20 GPEs */ if (ktime_to_us(diff) < 1000 * 50) { pr_debug("Suppressed key event 0x%X - " "Last press was %lld us ago\n", key->code, ktime_to_us(diff)); goto msi_wmi_notify_exit; } last_pressed = cur; } if (key->type == KE_KEY && /* Brightness is served via acpi video driver */ (backlight || (key->code != MSI_KEY_BRIGHTNESSUP && key->code != MSI_KEY_BRIGHTNESSDOWN))) { pr_debug("Send key: 0x%X - Input layer keycode: %d\n", key->code, key->keycode); sparse_keymap_report_entry(msi_wmi_input_dev, key, 1, true); } } else pr_info("Unknown event received\n"); msi_wmi_notify_exit: kfree(response.pointer); }
static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) { *kt_before = ktime_get_real(); *in_purr = mfspr(SPRN_PURR); /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ get_lppaca()->idle = 1; }
/* * GPIO ISR * State machine for reading the sensor request. * Hopefuly the hardware performs some filtering. */ static irqreturn_t read_isr(int irq, void *data) { ktime_t now = ktime_get_real(); static int bit_count, char_count; switch (_read_req) { case READ_START: if (gpio_get_value(_pin) == 0) { _read_req = READ_START_HIGH; } break; case READ_START_HIGH: if (gpio_get_value(_pin) == 1) { _read_req = READ_BIT_START; } break; case READ_BIT_START: if (gpio_get_value(_pin) == 0) { _read_req = READ_BIT_HIGH; bit_count = 7; char_count = 0; memset(_data, 0, sizeof(_data)); } break; case READ_BIT_HIGH: if (gpio_get_value(_pin) == 1) { _read_req = READ_BIT_LOW; } break; case READ_BIT_LOW: if (gpio_get_value(_pin) == 0) { _ulen = ktime_us_delta(now, _old); if (_ulen > 40) { _data[char_count] |= (1 << bit_count); } if (--bit_count < 0) { char_count++; bit_count = 7; } if (char_count == 5) { _read_req = READ_STOP; wake_up_interruptible(&_queue); } else { _read_req = READ_BIT_HIGH; } } break; case READ_STOP: default: break; } _old = now; return IRQ_HANDLED; }
int mpodp_clean_rx(struct mpodp_if_priv *priv, struct mpodp_rxq *rxq, int budget) { struct net_device *netdev = priv->netdev; struct mpodp_rx *rx; int worked = 0; ktime_t now = ktime_get_real(); /* RX: 2nd step: give packet to kernel and update RX head */ while (budget-- && rxq->used != rxq->avail) { if (!mpodp_rx_is_done(priv, rxq, rxq->used)) { /* DMA transfer not completed */ break; } if (netif_msg_rx_status(priv)) netdev_info(netdev, "rxq[%d] rx[%d]: transfer done\n", rxq->id, rxq->used); /* get rx slot */ rx = &(rxq->ring[rxq->used]); if (rx->len == 0) { /* error packet, skip it */ goto pkt_skip; } dma_unmap_sg(&priv->pdev->dev, rx->sg, rx->dma_len, DMA_FROM_DEVICE); /* fill skb field */ skb_put(rx->skb, rx->len); skb_record_rx_queue(rx->skb, rxq->id); rx->skb->tstamp = now; rx->skb->protocol = eth_type_trans(rx->skb, netdev); netif_receive_skb(rx->skb); /* update stats */ netdev->stats.rx_bytes += rx->len; netdev->stats.rx_packets++; pkt_skip: rxq->used = (rxq->used + 1) % rxq->size; worked++; } /* write new RX head */ if (worked) { writel(rxq->used, rxq->head_addr); } return worked; }
static void bcmpmu_rpc_program_alarm(struct bcmpmu_rpc *rpc, long seconds) { ktime_t interval = ktime_set(seconds, 0); ktime_t next; pr_rpc(VERBOSE, "set timeout %ld s.\n", seconds); next = ktime_add(ktime_get_real(), interval); alarm_start(&rpc->alarm, next); }
/* * Initiate the call ack/resend/expiry timer. */ static void rxrpc_start_call_timer(struct rxrpc_call *call) { ktime_t now = ktime_get_real(), expire_at; expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime); call->expire_at = expire_at; call->ack_at = expire_at; call->ping_at = expire_at; call->resend_at = expire_at; call->timer.expires = jiffies + LONG_MAX / 2; rxrpc_set_timer(call, rxrpc_timer_begin, now); }
static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry, const struct sk_buff *skb, const u64 ndp) { const struct dccp_hdr *dh = dccp_hdr(skb); entry->tfrchrx_seqno = DCCP_SKB_CB(skb)->dccpd_seq; entry->tfrchrx_ccval = dh->dccph_ccval; entry->tfrchrx_type = dh->dccph_type; entry->tfrchrx_ndp = ndp; entry->tfrchrx_tstamp = ktime_get_real(); }
int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno) { struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any()); if (entry == NULL) return -ENOBUFS; entry->seqno = seqno; entry->stamp = ktime_get_real(); entry->next = *headp; *headp = entry; return 0; }
static ktime_t ptp_to_ktime(u64 ptptime) { ktime_t ktimebase; u64 ptpbase; unsigned long flags; local_irq_save(flags); /* Fill the icache with the code */ ktime_get_real(); /* Flush all pending operations */ mb(); /* Read the time and PTP clock as close together as * possible. It is important that this sequence take the same * amount of time to reduce jitter */ ktimebase = ktime_get_real(); ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI); local_irq_restore(flags); return ktime_sub_ns(ktimebase, ptpbase - ptptime); }
void mlx5e_timestamp_init(struct mlx5e_priv *priv) { struct mlx5e_tstamp *tstamp = &priv->tstamp; u64 ns; u64 frac = 0; u32 dev_freq; mlx5e_timestamp_init_config(tstamp); dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz); if (!dev_freq) { mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n"); return; } rwlock_init(&tstamp->lock); tstamp->cycles.read = mlx5e_read_internal_timer; tstamp->cycles.shift = MLX5E_CYCLES_SHIFT; tstamp->cycles.mult = clocksource_khz2mult(dev_freq, tstamp->cycles.shift); tstamp->nominal_c_mult = tstamp->cycles.mult; tstamp->cycles.mask = CLOCKSOURCE_MASK(41); tstamp->mdev = priv->mdev; timecounter_init(&tstamp->clock, &tstamp->cycles, ktime_to_ns(ktime_get_real())); /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. */ ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, frac, &frac); do_div(ns, NSEC_PER_SEC / 2 / HZ); tstamp->overflow_period = ns; INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); if (tstamp->overflow_period) schedule_delayed_work(&tstamp->overflow_work, 0); else mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); /* Configure the PHC */ tstamp->ptp_info = mlx5e_ptp_clock_info; snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, &priv->mdev->pdev->dev); if (IS_ERR(tstamp->ptp)) { mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n", PTR_ERR(tstamp->ptp)); tstamp->ptp = NULL; } }
/** * igb_ptp_reset - Re-enable the adapter for PTP following a reset. * @adapter: Board private structure. * * This function handles the reset work required to re-enable the PTP device. **/ void igb_ptp_reset(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; if (!(adapter->flags & IGB_FLAG_PTP)) return; switch (adapter->hw.mac.type) { case e1000_82576: /* Dial the nominal frequency. */ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); break; case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: /* Enable the timer functions and interrupts. */ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS); E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS); break; default: /* No work to do. */ return; } /* Re-initialize the timer. */ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { struct timespec ts = ktime_to_timespec(ktime_get_real()); igb_ptp_settime_i210(&adapter->ptp_caps, &ts); } else { timecounter_init(&adapter->tc, &adapter->cc, ktime_to_ns(ktime_get_real())); } }
void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd) { /*--------------------------------------------------------- // kick off PPP engine /---------------------------------------------------------*/ if (term == MDP_PPP_TERM) { if (mdp_debug[MDP_PPP_BLOCK]) { jiffies_to_timeval(jiffies, &mdp_ppp_timeval); } INIT_COMPLETION(mdp_ppp_comp); mdp_ppp_waiting = TRUE; // let's turn on PPP block mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE); //HWIO_OUT(MDP_DISPLAY0_START, 0x1000); outpdw(MDP_BASE + 0x30, 0x1000); wait_for_completion_interruptible(&mdp_ppp_comp); if (mdp_debug[MDP_PPP_BLOCK]) { struct timeval now; jiffies_to_timeval(jiffies, &now); mdp_ppp_timeval.tv_usec = now.tv_usec - mdp_ppp_timeval.tv_usec; MSM_FB_INFO("MDP-PPP: %d\n", (int)mdp_ppp_timeval.tv_usec); } } else if (term == MDP_DMA2_TERM) { if (mdp_debug[MDP_DMA2_BLOCK]) { MSM_FB_INFO("MDP-DMA2: %d\n", (int)mdp_dma2_timeval.tv_usec); jiffies_to_timeval(jiffies, &mdp_dma2_timeval); } // DMA update timestamp mdp_dma2_last_update_time = ktime_get_real(); // let's turn on DMA2 block // mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON,FALSE); #ifdef CONFIG_FB_MSM_MDP22 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0); // start DMA #else outpdw(MDP_BASE + 0x0044, 0x0); // start DMA #endif } else if (term == MDP_DMA_S_TERM) { mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE); outpdw(MDP_BASE + 0x0048, 0x0); // start DMA } }
static u32 seq_scale(u32 seq) { /* * As close as possible to RFC 793, which * suggests using a 250 kHz clock. * Further reading shows this assumes 2 Mb/s networks. * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but * we also need to limit the resolution so that the u32 seq * overlaps less than one time per MSL (2 minutes). * Choosing a clock of 64 ns period is OK. (period of 274 s) */ return seq + (ktime_to_ns(ktime_get_real()) >> 6); }
/** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU * @index: index of target state * * This is equivalent to the HALT instruction. */ static int acpi_idle_enter_c1(struct cpuidle_device *dev, int index) { ktime_t kt1, kt2; s64 idle_time; struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); pr = __get_cpu_var(processors); dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; local_irq_disable(); if (acpi_idle_suspend) { local_irq_enable(); cpu_relax(); return -EBUSY; } lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); idle_time = ktime_to_us(ktime_sub(kt2, kt1)); /* Update device last_residency*/ dev->last_residency = (int)idle_time; local_irq_enable(); cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); return index; }