/* Measure RTT for each ack. */ static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, ktime_t last) { struct illinois *ca = inet_csk_ca(sk); u32 rtt; ca->acked = pkts_acked; if (ktime_equal(last, net_invalid_timestamp())) return; rtt = ktime_to_us(net_timedelta(last)); /* ignore bogus values, this prevents wraparound in alpha math */ if (rtt > RTT_MAX) rtt = RTT_MAX; /* keep track of minimum RTT seen so far */ if (ca->base_rtt > rtt) ca->base_rtt = rtt; /* and max */ if (ca->max_rtt < rtt) ca->max_rtt = rtt; ++ca->cnt_rtt; ca->sum_rtt += rtt; }
static ssize_t vsync_show_event(struct device *dev, struct device_attribute *attr, char *buf) { int cndx; struct vsycn_ctrl *vctrl; ssize_t ret = 0; u64 vsync_tick; ktime_t timestamp; cndx = 0; vctrl = &vsync_ctrl_db[0]; timestamp = vctrl->vsync_time; ret = wait_event_interruptible_timeout(vctrl->wait_queue, !ktime_equal(timestamp, vctrl->vsync_time) && vctrl->vsync_irq_enabled, msecs_to_jiffies(VSYNC_PERIOD * 4)); if (ret == -ERESTARTSYS) return ret; else if (ret == 0) vsync_tick = ktime_to_ns(ktime_get()); else vsync_tick = ktime_to_ns(vctrl->vsync_time); ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick); buf[strlen(buf) + 1] = '\0'; return ret; }
ssize_t mdp4_dsi_video_show_event(struct device *dev, struct device_attribute *attr, char *buf) { int cndx; struct vsycn_ctrl *vctrl; ssize_t ret = 0; u64 vsync_tick; ktime_t timestamp; cndx = 0; vctrl = &vsync_ctrl_db[0]; timestamp = vctrl->vsync_time; sec_debug_mdp_set_value(SEC_DEBUG_VSYNC_SYSFS_EVENT, SEC_DEBUG_IN); ret = wait_event_interruptible(vctrl->wait_queue, !ktime_equal(timestamp, vctrl->vsync_time) && vctrl->vsync_irq_enabled); if (ret == -ERESTARTSYS) return ret; vsync_tick = ktime_to_ns(vctrl->vsync_time); ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick); buf[strlen(buf) + 1] = '\0'; sec_debug_mdp_set_value(SEC_DEBUG_VSYNC_SYSFS_EVENT, SEC_DEBUG_OUT); return ret; }
void mdp4_dsi_video_wait4vsync(int cndx) { struct vsycn_ctrl *vctrl; int ret; ktime_t timestamp; unsigned long flags; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; if (atomic_read(&vctrl->suspend) > 0) return; spin_lock_irqsave(&vctrl->spin_lock, flags); timestamp = vctrl->vsync_time; spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_video_vsync_irq_ctrl(cndx, 1); ret = wait_event_timeout(vctrl->wait_queue_internal, !ktime_equal(timestamp, vctrl->vsync_time), msecs_to_jiffies(VSYNC_PERIOD * 8)); if (ret <= 0) pr_err("%s timeout ret=%d", __func__, ret); mdp4_video_vsync_irq_ctrl(cndx, 0); mdp4_stat.wait4vsync0++; }
void mdp4_dsi_video_wait4vsync(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; int ret; ktime_t timestamp; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (atomic_read(&vctrl->suspend) > 0) return; mdp4_video_vsync_irq_ctrl(cndx, 1); timestamp = vctrl->vsync_time; ret = wait_event_interruptible_timeout(vctrl->wait_queue, !ktime_equal(timestamp, vctrl->vsync_time)&& vctrl->vsync_irq_enabled, msecs_to_jiffies(VSYNC_PERIOD * 8)); if (ret <= 0) pr_err("%s timeout ret=%d", __func__, ret); mdp4_video_vsync_irq_ctrl(cndx, 0); mdp4_stat.wait4vsync0++; }
ssize_t mdp4_dsi_video_show_event(struct device *dev, struct device_attribute *attr, char *buf) { int cndx; struct vsycn_ctrl *vctrl; ssize_t ret = 0; u64 vsync_tick; ktime_t timestamp; unsigned long flags; cndx = 0; vctrl = &vsync_ctrl_db[0]; spin_lock_irqsave(&vctrl->spin_lock, flags); timestamp = vctrl->vsync_time; spin_unlock_irqrestore(&vctrl->spin_lock, flags); ret = wait_event_interruptible(vctrl->wait_queue, !ktime_equal(timestamp, vctrl->vsync_time) && vctrl->vsync_irq_enabled); if (ret == -ERESTARTSYS) return ret; spin_lock_irqsave(&vctrl->spin_lock, flags); vsync_tick = ktime_to_ns(vctrl->vsync_time); spin_unlock_irqrestore(&vctrl->spin_lock, flags); ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick); buf[strlen(buf) + 1] = '\0'; return ret; }
int mxr_reg_wait4update(struct mxr_device *mdev) { ktime_t timestamp = mdev->vsync_timestamp; int ret; ret = wait_event_interruptible_timeout(mdev->vsync_wait, !ktime_equal(timestamp, mdev->vsync_timestamp) && !mxr_update_pending(mdev), msecs_to_jiffies(100)); if (ret > 0) return 0; if (ret < 0) return ret; mxr_warn(mdev, "no vsync detected - timeout\n"); return -ETIME; }
/* Do rtt sampling needed for Veno. */ static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) { struct veno *veno = inet_csk_ca(sk); u32 vrtt; if (ktime_equal(last, net_invalid_timestamp())) return; /* Never allow zero rtt or baseRTT */ vrtt = ktime_to_us(net_timedelta(last)) + 1; /* Filter to find propagation delay: */ if (vrtt < veno->basertt) veno->basertt = vrtt; /* Find the min rtt during the last rtt to find * the current prop. delay + queuing delay: */ veno->minrtt = min(veno->minrtt, vrtt); veno->cntrtt++; }
/* Do RTT sampling needed for Vegas. * Basically we: * o min-filter RTT samples from within an RTT to get the current * propagation delay + queuing delay (we are min-filtering to try to * avoid the effects of delayed ACKs) * o min-filter RTT samples from a much longer window (forever for now) * to find the propagation delay (baseRTT) */ void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) { cnt = cnt; struct vegas *vegas = inet_csk_ca(sk); u32 vrtt; if (ktime_equal(last, net_invalid_timestamp())) return; /* Never allow zero rtt or baseRTT */ vrtt = ktime_to_us(net_timedelta(last)) + 1; /* Filter to find propagation delay: */ if (vrtt < vegas->baseRTT) vegas->baseRTT = vrtt; /* Find the min RTT during the last RTT to find * the current prop. delay + queuing delay: */ vegas->minRTT = min(vegas->minRTT, vrtt); vegas->cntRTT++; }
ssize_t mdp4_lcdc_show_event(struct device *dev, struct device_attribute *attr, char *buf) { int cndx; struct vsycn_ctrl *vctrl; ssize_t ret = 0; u64 vsync_tick; ktime_t timestamp; cndx = 0; vctrl = &vsync_ctrl_db[0]; timestamp = vctrl->vsync_time; ret = wait_event_interruptible(vctrl->wait_queue, !ktime_equal(timestamp, vctrl->vsync_time) && vctrl->vsync_irq_enabled); if (ret == -ERESTARTSYS) return ret; vsync_tick = ktime_to_ns(vctrl->vsync_time); ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick); buf[strlen(buf) + 1] = '\0'; return ret; }
/** * tick_nohz_stop_sched_tick - stop the idle tick from the idle task * * When the next event is more than a tick into the future, stop the idle tick * Called either from the idle loop or from irq_exit() when an idle period was * just interrupted by an interrupt which did not cause a reschedule. */ void tick_nohz_stop_sched_tick(int inidle) { unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; struct tick_sched *ts; ktime_t last_update, expires, now; struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; u64 time_delta; int cpu; local_irq_save(flags); cpu = smp_processor_id(); ts = &per_cpu(tick_cpu_sched, cpu); /* * Call to tick_nohz_start_idle stops the last_update_time from being * updated. Thus, it must not be called in the event we are called from * irq_exit() with the prior state different than idle. */ if (!inidle && !ts->inidle) goto end; /* * Set ts->inidle unconditionally. Even if the system did not * switch to NOHZ mode the cpu frequency governers rely on the * update of the idle time accounting in tick_nohz_start_idle(). */ ts->inidle = 1; now = tick_nohz_start_idle(ts); /* * If this cpu is offline and it is the one which updates * jiffies, then give up the assignment and let it be taken by * the cpu which runs the tick timer next. If we don't drop * this here the jiffies might be stale and do_timer() never * invoked. */ if (unlikely(!cpu_online(cpu))) { if (cpu == tick_do_timer_cpu) tick_do_timer_cpu = TICK_DO_TIMER_NONE; } if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) goto end; if (need_resched()) goto end; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { static int ratelimit; if (ratelimit < 10) { printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", local_softirq_pending()); ratelimit++; } goto end; } ts->idle_calls++; /* Read jiffies and the time when jiffies were updated last */ do { seq = read_seqbegin(&xtime_lock); last_update = last_jiffies_update; last_jiffies = jiffies; /* * On SMP we really should only care for the CPU which * has the do_timer duty assigned. All other CPUs can * sleep as long as they want. */ if (cpu == tick_do_timer_cpu || tick_do_timer_cpu == TICK_DO_TIMER_NONE) time_delta = timekeeping_max_deferment(); else time_delta = KTIME_MAX; } while (read_seqretry(&xtime_lock, seq)); if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || arch_needs_cpu(cpu)) { next_jiffies = last_jiffies + 1; delta_jiffies = 1; } else { /* Get the next timer wheel timer */ next_jiffies = get_next_timer_interrupt(last_jiffies); delta_jiffies = next_jiffies - last_jiffies; } /* * Do not stop the tick, if we are only one off * or if the cpu is required for rcu */ if (!ts->tick_stopped && delta_jiffies == 1) goto out; /* Schedule the tick, if we are at least one jiffie off */ if ((long)delta_jiffies >= 1) { /* * calculate the expiry time for the next timer wheel * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals * that there is no timer pending or at least extremely * far into the future (12 days for HZ=1000). In this * case we set the expiry to the end of time. */ if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { /* * Calculate the time delta for the next timer event. * If the time delta exceeds the maximum time delta * permitted by the current clocksource then adjust * the time delta accordingly to ensure the * clocksource does not wrap. */ time_delta = min_t(u64, time_delta, tick_period.tv64 * delta_jiffies); expires = ktime_add_ns(last_update, time_delta); } else { expires.tv64 = KTIME_MAX; } /* * If this cpu is the one which updates jiffies, then * give up the assignment and let it be taken by the * cpu which runs the tick timer next, which might be * this cpu as well. If we don't drop this here the * jiffies might be stale and do_timer() never * invoked. */ if (cpu == tick_do_timer_cpu) tick_do_timer_cpu = TICK_DO_TIMER_NONE; if (delta_jiffies > 1) cpumask_set_cpu(cpu, nohz_cpu_mask); /* Skip reprogram of event if its not changed */ if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) goto out; /* * nohz_stop_sched_tick can be called several times before * the nohz_restart_sched_tick is called. This happens when * interrupts arrive which do not cause a reschedule. In the * first call we save the current tick time, so we can restart * the scheduler tick in nohz_restart_sched_tick. */ if (!ts->tick_stopped) { if (select_nohz_load_balancer(1)) { /* * sched tick not stopped! */ cpumask_clear_cpu(cpu, nohz_cpu_mask); goto out; } ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); ts->tick_stopped = 1; ts->idle_jiffies = last_jiffies; rcu_enter_nohz(); } ts->idle_sleeps++; /* Mark expires */ ts->idle_expires = expires; /* * If the expiration time == KTIME_MAX, then * in this case we simply stop the tick timer. */ if (unlikely(expires.tv64 == KTIME_MAX)) { if (ts->nohz_mode == NOHZ_MODE_HIGHRES) hrtimer_cancel(&ts->sched_timer); goto out; } if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { hrtimer_start(&ts->sched_timer, expires, HRTIMER_MODE_ABS_PINNED); /* Check, if the timer was already in the past */ if (hrtimer_active(&ts->sched_timer)) goto out; } else if (!tick_program_event(expires, 0)) goto out; /* * We are past the event already. So we crossed a * jiffie boundary. Update jiffies and raise the * softirq. */ tick_do_update_jiffies64(ktime_get()); cpumask_clear_cpu(cpu, nohz_cpu_mask); } raise_softirq_irqoff(TIMER_SOFTIRQ); out: ts->next_jiffies = next_jiffies; ts->last_jiffies = last_jiffies; ts->sleep_length = ktime_sub(dev->next_event, now); end: local_irq_restore(flags); }
static void tick_nohz_stop_sched_tick(struct tick_sched *ts) { unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; unsigned long rcu_delta_jiffies; ktime_t last_update, expires, now; struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; u64 time_delta; int cpu; cpu = smp_processor_id(); ts = &per_cpu(tick_cpu_sched, cpu); now = tick_nohz_start_idle(cpu, ts); if (unlikely(!cpu_online(cpu))) { if (cpu == tick_do_timer_cpu) tick_do_timer_cpu = TICK_DO_TIMER_NONE; } if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) return; if (need_resched()) return; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { static int ratelimit; if (ratelimit < 10) { printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", (unsigned int) local_softirq_pending()); ratelimit++; } return; } ts->idle_calls++; do { seq = read_seqbegin(&xtime_lock); last_update = last_jiffies_update; last_jiffies = jiffies; time_delta = timekeeping_max_deferment(); } while (read_seqretry(&xtime_lock, seq)); if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || arch_needs_cpu(cpu)) { next_jiffies = last_jiffies + 1; delta_jiffies = 1; } else { next_jiffies = get_next_timer_interrupt(last_jiffies); delta_jiffies = next_jiffies - last_jiffies; if (rcu_delta_jiffies < delta_jiffies) { next_jiffies = last_jiffies + rcu_delta_jiffies; delta_jiffies = rcu_delta_jiffies; } } if (!ts->tick_stopped && delta_jiffies == 1) goto out; if ((long)delta_jiffies >= 1) { if (cpu == tick_do_timer_cpu) { tick_do_timer_cpu = TICK_DO_TIMER_NONE; ts->do_timer_last = 1; } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { time_delta = KTIME_MAX; ts->do_timer_last = 0; } else if (!ts->do_timer_last) { time_delta = KTIME_MAX; } if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { time_delta = min_t(u64, time_delta, tick_period.tv64 * delta_jiffies); } if (time_delta < KTIME_MAX) expires = ktime_add_ns(last_update, time_delta); else expires.tv64 = KTIME_MAX; if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) goto out; if (!ts->tick_stopped) { select_nohz_load_balancer(1); ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); ts->tick_stopped = 1; ts->idle_jiffies = last_jiffies; } ts->idle_sleeps++; ts->idle_expires = expires; if (unlikely(expires.tv64 == KTIME_MAX)) { if (ts->nohz_mode == NOHZ_MODE_HIGHRES) hrtimer_cancel(&ts->sched_timer); goto out; } if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { hrtimer_start(&ts->sched_timer, expires, HRTIMER_MODE_ABS_PINNED); if (hrtimer_active(&ts->sched_timer)) goto out; } else if (!tick_program_event(expires, 0)) goto out; tick_do_update_jiffies64(ktime_get()); } raise_softirq_irqoff(TIMER_SOFTIRQ); out: ts->next_jiffies = next_jiffies; ts->last_jiffies = last_jiffies; }
static void tick_nohz_stop_sched_tick(struct tick_sched *ts) { unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; ktime_t last_update, expires, now; struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; u64 time_delta; int cpu; cpu = smp_processor_id(); ts = &per_cpu(tick_cpu_sched, cpu); now = tick_nohz_start_idle(cpu, ts); /* * If this cpu is offline and it is the one which updates * jiffies, then give up the assignment and let it be taken by * the cpu which runs the tick timer next. If we don't drop * this here the jiffies might be stale and do_timer() never * invoked. */ if (unlikely(!cpu_online(cpu))) { if (cpu == tick_do_timer_cpu) tick_do_timer_cpu = TICK_DO_TIMER_NONE; } if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) return; if (need_resched()) return; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { static int ratelimit; if (ratelimit < 10) { printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", (unsigned int) local_softirq_pending()); ratelimit++; } return; } ts->idle_calls++; /* Read jiffies and the time when jiffies were updated last */ do { seq = read_seqbegin(&xtime_lock); last_update = last_jiffies_update; last_jiffies = jiffies; time_delta = timekeeping_max_deferment(); } while (read_seqretry(&xtime_lock, seq)); if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || arch_needs_cpu(cpu)) { next_jiffies = last_jiffies + 1; delta_jiffies = 1; } else { /* Get the next timer wheel timer */ next_jiffies = get_next_timer_interrupt(last_jiffies); delta_jiffies = next_jiffies - last_jiffies; } /* * Do not stop the tick, if we are only one off * or if the cpu is required for rcu */ if (!ts->tick_stopped && delta_jiffies == 1) goto out; /* Schedule the tick, if we are at least one jiffie off */ if ((long)delta_jiffies >= 1) { /* * If this cpu is the one which updates jiffies, then * give up the assignment and let it be taken by the * cpu which runs the tick timer next, which might be * this cpu as well. If we don't drop this here the * jiffies might be stale and do_timer() never * invoked. Keep track of the fact that it was the one * which had the do_timer() duty last. If this cpu is * the one which had the do_timer() duty last, we * limit the sleep time to the timekeeping * max_deferement value which we retrieved * above. Otherwise we can sleep as long as we want. */ if (cpu == tick_do_timer_cpu) { tick_do_timer_cpu = TICK_DO_TIMER_NONE; ts->do_timer_last = 1; } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { time_delta = KTIME_MAX; ts->do_timer_last = 0; } else if (!ts->do_timer_last) { time_delta = KTIME_MAX; } /* * calculate the expiry time for the next timer wheel * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals * that there is no timer pending or at least extremely * far into the future (12 days for HZ=1000). In this * case we set the expiry to the end of time. */ if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { /* * Calculate the time delta for the next timer event. * If the time delta exceeds the maximum time delta * permitted by the current clocksource then adjust * the time delta accordingly to ensure the * clocksource does not wrap. */ time_delta = min_t(u64, time_delta, tick_period.tv64 * delta_jiffies); } if (time_delta < KTIME_MAX) expires = ktime_add_ns(last_update, time_delta); else expires.tv64 = KTIME_MAX; /* Skip reprogram of event if its not changed */ if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) goto out; /* * nohz_stop_sched_tick can be called several times before * the nohz_restart_sched_tick is called. This happens when * interrupts arrive which do not cause a reschedule. In the * first call we save the current tick time, so we can restart * the scheduler tick in nohz_restart_sched_tick. */ if (!ts->tick_stopped) { select_nohz_load_balancer(1); calc_load_enter_idle(); ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); ts->tick_stopped = 1; ts->idle_jiffies = last_jiffies; } ts->idle_sleeps++; /* Mark expires */ ts->idle_expires = expires; /* * If the expiration time == KTIME_MAX, then * in this case we simply stop the tick timer. */ if (unlikely(expires.tv64 == KTIME_MAX)) { if (ts->nohz_mode == NOHZ_MODE_HIGHRES) hrtimer_cancel(&ts->sched_timer); goto out; } if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { hrtimer_start(&ts->sched_timer, expires, HRTIMER_MODE_ABS_PINNED); /* Check, if the timer was already in the past */ if (hrtimer_active(&ts->sched_timer)) goto out; } else if (!tick_program_event(expires, 0)) goto out; /* * We are past the event already. So we crossed a * jiffie boundary. Update jiffies and raise the * softirq. */ tick_do_update_jiffies64(ktime_get()); } raise_softirq_irqoff(TIMER_SOFTIRQ); out: ts->next_jiffies = next_jiffies; ts->last_jiffies = last_jiffies; }
static int vsync_timestamp_changed(struct hisi_fb_data_type *hisifd, ktime_t prev_timestamp) { BUG_ON(hisifd == NULL); return !ktime_equal(prev_timestamp, hisifd->vsync_ctrl.vsync_timestamp); }
static int s3cfb_vsync_timestamp_changed(struct s3cfb_global *fbdev, ktime_t prev_timestamp) { rmb(); return !ktime_equal(prev_timestamp, fbdev->vsync_timestamp); }
/* Do RTT sampling needed for Vegas. * Basically we: * o min-filter RTT samples from within an RTT to get the current * propagation delay + queuing delay (we are min-filtering to try to * avoid the effects of delayed ACKs) * o min-filter RTT samples from a much longer window (forever for now) * to find the propagation delay (baseRTT) */ void tcp_sod_delay_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) { struct tcp_sock *tp = tcp_sk(sk); struct sod_delay *sod = inet_csk_ca(sk); u64 vrtt; u64 qd_plus_td, remain_qd; u16 est_ql = 0; if (ktime_equal(last, net_invalid_timestamp())) return; /* Never allow zero rtt or baseRTT */ vrtt = ktime_to_us(net_timedelta(last)) + 1; /* Filter to find propagation delay: */ if (vrtt < sod->baseRTT) sod->baseRTT = vrtt; qd_plus_td = vrtt - sod->baseRTT; if (tp->td_last_index == 0) est_ql = 0; else { if (qd_plus_td < tp->ary_td[tp->td_last_index -1].td_i) est_ql = 1; else { u32 td_index = tp->td_last_index -1; remain_qd = qd_plus_td - tp->ary_td[td_index].td_i; while (remain_qd > 0 && td_index != tp->td_head_index) { if (tp->td_head_index >= tp->td_last_index && td_index == 0 ) { if (tp->td_count > 1) td_index = tp->td_count -1; else break; } else td_index--; if (remain_qd > tp->ary_td[td_index].td_i) { remain_qd -= tp->ary_td[td_index].td_i; est_ql++; } else { est_ql++; break; } } est_ql++; } } sod->curQL = est_ql; sod->minQL = min(sod->minQL, est_ql); //printf("current estimation: %lu\n", sod->curQL); /* Find the min RTT during the last RTT to find * the current prop. delay + queuing delay: */ sod->minRTT = min(sod->minRTT, vrtt); sod->cntRTT++; }