void iso_rx_rcp_update(struct iso_rx_context *rxctx) { /* Based on rxctx->rx_rate, determine one advertised rate * rxctx->rcp_rate. */ ktime_t now = ktime_get(); u64 dt, rate; u32 cap, cap2; dt = ktime_us_delta(now, rxctx->last_rcp_time); if (dt < ISO_VQ_HRCP_US) return; if (!spin_trylock(&rxctx->vq_spinlock)) return; dt = ktime_us_delta(now, rxctx->last_rcp_time); if (dt < ISO_VQ_HRCP_US) goto unlock; cap = ISO_VQ_DRAIN_RATE_MBPS; cap2 = ISO_VQ_DRAIN_RATE_MBPS << 1; rate = (u64)rxctx->rcp_rate * (cap2 + cap - rxctx->rx_rate) / cap2; rate = max_t(u64, ISO_MIN_RFAIR, rate); rate = min_t(u64, ISO_VQ_DRAIN_RATE_MBPS, rate); rxctx->rcp_rate = rate; rxctx->last_rcp_time = now; unlock: spin_unlock(&rxctx->vq_spinlock); }
void nvhost_scale3d_notify_idle(struct nvhost_module *mod) { ktime_t t; unsigned long dt; mutex_lock(&scale3d.lock); if (!scale3d.enable) goto done; t = ktime_get(); if (scale3d.is_idle) { dt = ktime_us_delta(t, scale3d.last_idle); scale3d.idle_total += dt; dt = ktime_us_delta(t, scale3d.last_short_term_idle); scale3d.idle_short_term_total += dt; } else scale3d.is_idle = 1; scale3d.last_idle = t; scale3d.last_short_term_idle = t; scaling_state_check(scale3d.last_idle); /* delay idle_max % of 2 * fast_response time (given in microseconds) */ schedule_delayed_work(&scale3d.idle_timer, msecs_to_jiffies((scale3d.idle_max * scale3d.fast_response) / 50000)); done: mutex_unlock(&scale3d.lock); }
void nvhost_scale3d_notify_busy(struct nvhost_module *mod) { unsigned long idle; unsigned long short_term_idle; ktime_t t; mutex_lock(&scale3d.lock); if (!scale3d.enable) goto done; cancel_delayed_work(&scale3d.idle_timer); t = ktime_get(); if (scale3d.is_idle) { idle = (unsigned long) ktime_us_delta(t, scale3d.last_idle); scale3d.idle_total += idle; short_term_idle = ktime_us_delta(t, scale3d.last_short_term_idle); scale3d.idle_short_term_total += short_term_idle; scale3d.is_idle = 0; } scaling_state_check(t); done: mutex_unlock(&scale3d.lock); }
static int multi_get(void *data, u64 *val) { ktime_t finish; struct spin_multi_state ms; struct spin_multi_per_thread t1, t2; ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get"); ms.loops = 1000000; atomic_set(&ms.start_wait, 2); atomic_set(&ms.enter_wait, 2); atomic_set(&ms.exit_wait, 2); t1.state = &ms; t2.state = &ms; kthread_run(multi_other, &t2, "multi_get"); multi_other(&t1); finish = ktime_get(); *val = ktime_us_delta(finish, t1.start); return 0; }
static void xyref5260_lcd_on(void) { s64 us = ktime_us_delta(lcd_on_time, ktime_get_boottime()); if (us > LCD_POWER_OFF_TIME_US) { pr_warn("lcd on sleep time too long\n"); us = LCD_POWER_OFF_TIME_US; } if (us > 0) usleep_range(us, us); s3c_gpio_setpull(EXYNOS5260_GPB2(0), S3C_GPIO_PULL_NONE); s3c_gpio_setpull(EXYNOS5260_GPD2(2), S3C_GPIO_PULL_NONE); gpio_request_one(EXYNOS5260_GPD2(2), GPIOF_OUT_INIT_HIGH, "GPD2"); usleep_range(5000, 6000); gpio_free(EXYNOS5260_GPD2(2)); #ifndef CONFIG_BACKLIGHT_PWM s3c_gpio_setpull(EXYNOS5260_GPD2(1), S3C_GPIO_PULL_NONE); gpio_request_one(EXYNOS5260_GPD2(1), GPIOF_OUT_INIT_HIGH, "GPD2"); usleep_range(5000, 6000); gpio_free(EXYNOS5260_GPD2(1)); #endif }
static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf, unsigned int count) { unsigned long flags; ktime_t edge; s64 delta; int i; spin_lock_irqsave(&hardware[type].lock, flags); if (type == IR_IRDEO) { /* DTR, RTS down */ on(); } edge = ktime_get(); for (i = 0; i < count; i++) { if (i % 2) hardware[type].send_space(); else hardware[type].send_pulse(txbuf[i], edge); edge = ktime_add_us(edge, txbuf[i]); delta = ktime_us_delta(edge, ktime_get()); if (delta > 25) { spin_unlock_irqrestore(&hardware[type].lock, flags); usleep_range(delta - 25, delta + 25); spin_lock_irqsave(&hardware[type].lock, flags); } else if (delta > 0) { udelay(delta); } } off(); spin_unlock_irqrestore(&hardware[type].lock, flags); return count; }
static int ss_get(void *data, u64 *val) { ktime_t start, finish; int loops; int cont; DEFINE_RAW_SPINLOCK(ss_spin); loops = 1000000; cont = 1; start = ktime_get(); while (cont) { raw_spin_lock(&ss_spin); loops--; if (loops == 0) cont = 0; raw_spin_unlock(&ss_spin); } finish = ktime_get(); *val = ktime_us_delta(finish, start); return 0; }
/* Some chips need a delay between accesses */ static inline void zl6100_wait(const struct zl6100_data *data) { if (data->delay) { s64 delta = ktime_us_delta(ktime_get(), data->access); if (delta < data->delay) udelay(data->delay - delta); } }
static void scale3d_clocks(unsigned long percent) { unsigned long hz, curr; int i = 0; ktime_t t; if (!tegra_is_clk_enabled(scale3d.clk_3d)) return; if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) if (!tegra_is_clk_enabled(scale3d.clk_3d2)) return; curr = clk_get_rate(scale3d.clk_3d); hz = percent * (curr / 100); if (!(hz >= scale3d.max_rate_3d && curr == scale3d.max_rate_3d)) { if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) clk_set_rate(scale3d.clk_3d2, 0); if (is_tegra_camera_on()) clk_set_rate(scale3d.clk_3d, CAMERA_3D_CLK); else clk_set_rate(scale3d.clk_3d, hz); if (scale3d.p_scale_emc) { long after = (long) clk_get_rate(scale3d.clk_3d); hz = after * scale3d.emc_slope + scale3d.emc_offset; if (scale3d.p_emc_dip) hz -= (scale3d.emc_dip_slope * POW2(after / 1000 - scale3d.emc_xmid) + scale3d.emc_dip_offset); if (is_tegra_camera_on()) clk_set_rate(scale3d.clk_3d_emc, CAMERA_3D_EMC_CLK); else clk_set_rate(scale3d.clk_3d_emc, hz); } } t = ktime_get(); hz = clk_get_rate(scale3d.clk_3d); if (hz != curr) { gpu_loading[curr_idx].total_time += ktime_us_delta(t, gpu_loading[curr_idx].last_start); for (i=0 ; i<FREQ_LEVEL ; i++) { if (gpu_loading[i].freq == hz) { curr_idx = i; break; } } gpu_loading[curr_idx].last_start = t; } }
void iso_rx_stats_update(struct iso_rx_context *rxctx, struct sk_buff *skb) { int cpu = smp_processor_id(); struct iso_rx_stats *rxstats = per_cpu_ptr(rxctx->stats, cpu); u64 dt; ktime_t now; int i; u64 rx_bytes; rxstats->rx_bytes += skb_size(skb); rxstats->rx_packets += 1; now = ktime_get(); /* There are too many test-and-test-and-set things going on. * Abstract it out as light lock? */ dt = ktime_us_delta(now, rxctx->last_stats_update_time); if (dt < ISO_VQ_UPDATE_INTERVAL_US) return; if (!spin_trylock(&rxctx->vq_spinlock)) return; dt = ktime_us_delta(now, rxctx->last_stats_update_time); if (dt < ISO_VQ_UPDATE_INTERVAL_US) goto unlock; rxctx->last_stats_update_time = now; rxctx->global_stats_last = rxctx->global_stats; rxctx->global_stats.rx_bytes = 0; rxctx->global_stats.rx_packets = 0; /* Quickly sum everything up */ for_each_online_cpu(i) { struct iso_rx_stats *st = per_cpu_ptr(rxctx->stats, i); rxctx->global_stats.rx_bytes += st->rx_bytes; rxctx->global_stats.rx_packets = st->rx_packets; } /* bits per us = mbps */ rx_bytes = (rxctx->global_stats.rx_bytes - rxctx->global_stats_last.rx_bytes); rxctx->rx_rate = (rx_bytes << 3) / dt; unlock: spin_unlock(&rxctx->vq_spinlock); }
void p9stat_leave(ktime_t *t) { int ar = atomic_read(&active_requests); ktime_t end = ktime_get(); s64 delta = ktime_us_delta(end, *t); atomic_dec(&active_requests); p9stat_record(delta, ar); }
/* * performance debug info */ void hva_dbg_perf_begin(struct hva_ctx *ctx) { u64 div; u32 period; u32 bitrate; struct hva_ctx_dbg *dbg = &ctx->dbg; ktime_t prev = dbg->begin; dbg->begin = ktime_get(); if (dbg->is_valid_period) { /* encoding period */ div = (u64)ktime_us_delta(dbg->begin, prev); do_div(div, 100); period = (u32)div; dbg->min_period = min(period, dbg->min_period); dbg->max_period = max(period, dbg->max_period); dbg->total_period += period; dbg->cnt_period++; /* * minimum and maximum bitrates are based on the * encoding period values upon a window of 32 samples */ dbg->window_duration += period; dbg->cnt_window++; if (dbg->cnt_window >= 32) { /* * bitrate in kbps = (size * 8 / 1000) / * (duration / 10000) * = size * 80 / duration */ if (dbg->window_duration > 0) { div = (u64)dbg->window_stream_size * 80; do_div(div, dbg->window_duration); bitrate = (u32)div; dbg->last_bitrate = bitrate; dbg->min_bitrate = min(bitrate, dbg->min_bitrate); dbg->max_bitrate = max(bitrate, dbg->max_bitrate); } dbg->window_stream_size = 0; dbg->window_duration = 0; dbg->cnt_window = 0; } } /* * filter sequences valid for performance: * - begin/begin (no stream available) is an invalid sequence * - begin/end is a valid sequence */ dbg->is_valid_period = false; }
void nvhost_scale3d_notify_idle(struct nvhost_device *dev) { ktime_t t; unsigned long dt; if (!scale3d.enable) return; /* if throughput hint enabled, and last hint is recent enough, return */ if (scale3d.p_use_throughput_hint) { t = ktime_get(); if (ktime_us_delta(t, scale3d.last_throughput_hint) < 1000000) return; } mutex_lock(&scale3d.lock); t = ktime_get(); if (scale3d.is_idle) { dt = ktime_us_delta(t, scale3d.last_idle); scale3d.idle_total += dt; dt = ktime_us_delta(t, scale3d.last_short_term_idle); scale3d.idle_short_term_total += dt; } else { scale3d.is_idle = 1; gpu_loading[curr_idx].busy_time += ktime_us_delta(t, scale3d.last_busy); } scale3d.last_idle = t; scale3d.last_short_term_idle = t; scaling_state_check(scale3d.last_idle); /* delay idle_max % of 2 * fast_response time (given in microseconds) */ schedule_delayed_work(&scale3d.idle_timer, msecs_to_jiffies((scale3d.idle_max * scale3d.fast_response) / 50000)); mutex_unlock(&scale3d.lock); }
/* * GPIO ISR * State machine for reading the sensor request. * Hopefuly the hardware performs some filtering. */ static irqreturn_t read_isr(int irq, void *data) { ktime_t now = ktime_get_real(); static int bit_count, char_count; switch (_read_req) { case READ_START: if (gpio_get_value(_pin) == 0) { _read_req = READ_START_HIGH; } break; case READ_START_HIGH: if (gpio_get_value(_pin) == 1) { _read_req = READ_BIT_START; } break; case READ_BIT_START: if (gpio_get_value(_pin) == 0) { _read_req = READ_BIT_HIGH; bit_count = 7; char_count = 0; memset(_data, 0, sizeof(_data)); } break; case READ_BIT_HIGH: if (gpio_get_value(_pin) == 1) { _read_req = READ_BIT_LOW; } break; case READ_BIT_LOW: if (gpio_get_value(_pin) == 0) { _ulen = ktime_us_delta(now, _old); if (_ulen > 40) { _data[char_count] |= (1 << bit_count); } if (--bit_count < 0) { char_count++; bit_count = 7; } if (char_count == 5) { _read_req = READ_STOP; wake_up_interruptible(&_queue); } else { _read_req = READ_BIT_HIGH; } } break; case READ_STOP: default: break; } _old = now; return IRQ_HANDLED; }
static int wait_and_poll(struct issp_host *host) { ktime_t start; pin_data_in(host); /* wait for data pin go to high */ start = ktime_get(); while (1) { pin_clk_lo(host); if (pin_data(host)) break; pin_clk_hi(host); if (ktime_us_delta(ktime_get(), start) >= ISSP_DATA_TRANS_TIMEOUT) { pin_data_z(host); pin_clk_lo(host); dev_err(&host->pdev->dev, "Poll high timeout!\n"); return -ETIMEDOUT; } } /* wait for data pin go to low to finish runing vector */ start = ktime_get(); while (1) { if (!pin_data(host)) break; if (ktime_us_delta(ktime_get(), start) >= ISSP_DATA_TRANS_TIMEOUT) { pin_data_z(host); dev_err(&host->pdev->dev, "Poll low timeout!\n"); return -ETIMEDOUT; } } send_vector(host, vec_wait_and_poll, bits_wait_and_poll); return 0; }
/* The EMC registers have shadow registers. When the EMC clock is updated * in the clock controller, the shadow registers are copied to the active * registers, allowing glitchless memory bus frequency changes. * This function updates the shadow registers for a new clock frequency, * and relies on the clock lock on the emc clock to avoid races between * multiple frequency changes. In addition access lock prevents concurrent * access to EMC registers from reading MRR registers */ int tegra_emc_set_rate(unsigned long rate) { int i; u32 clk_setting; const struct tegra11_emc_table *last_timing; unsigned long flags; s64 last_change_delay; if (!tegra_emc_table) return -EINVAL; /* Table entries specify rate in kHz */ rate = rate / 1000; i = get_start_idx(rate); for (; i < tegra_emc_table_size; i++) { if (tegra_emc_clk_sel[i].input == NULL) continue; /* invalid entry */ if (tegra_emc_table[i].rate == rate) break; } if (i >= tegra_emc_table_size) return -EINVAL; if (!emc_timing) { /* can not assume that boot timing matches dfs table even if boot frequency matches one of the table nodes */ emc_get_timing(&start_timing); last_timing = &start_timing; } else last_timing = emc_timing; clk_setting = tegra_emc_clk_sel[i].value; last_change_delay = ktime_us_delta(ktime_get(), clkchange_time); if ((last_change_delay >= 0) && (last_change_delay < clkchange_delay)) udelay(clkchange_delay - (int)last_change_delay); spin_lock_irqsave(&emc_access_lock, flags); emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting); clkchange_time = ktime_get(); emc_timing = &tegra_emc_table[i]; spin_unlock_irqrestore(&emc_access_lock, flags); emc_last_stats_update(i); pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting); return 0; }
void nvhost_scale3d_notify_busy(struct nvhost_device *dev) { unsigned long idle; unsigned long short_term_idle; ktime_t t; if (!scale3d.enable) return; /* if throughput hint enabled, and last hint is recent enough, return */ if (scale3d.p_use_throughput_hint) { t = ktime_get(); if (ktime_us_delta(t, scale3d.last_throughput_hint) < 1000000) return; } mutex_lock(&scale3d.lock); cancel_delayed_work(&scale3d.idle_timer); t = ktime_get(); if (scale3d.is_idle) { idle = (unsigned long) ktime_us_delta(t, scale3d.last_idle); scale3d.idle_total += idle; short_term_idle = ktime_us_delta(t, scale3d.last_short_term_idle); scale3d.idle_short_term_total += short_term_idle; scale3d.is_idle = 0; } else { gpu_loading[curr_idx].busy_time += ktime_us_delta(t, scale3d.last_busy); } scale3d.last_busy = t; scaling_state_check(t); mutex_unlock(&scale3d.lock); }
static void update_load_estimate(struct devfreq *df) { struct podgov_info_rec *podgov = df->data; unsigned long window; unsigned long t; ktime_t now = ktime_get(); t = ktime_us_delta(now, podgov->last_notification); /* if the last event was over GR3D_TIMEFRAME usec ago (1 sec), the * current load tracking data is probably stale */ if (t > GR3D_TIMEFRAME) { podgov->last_notification = now; podgov->estimation_window = now; podgov->last_estimation_window = now; podgov->total_idle = 0; podgov->last_total_idle = 0; podgov->idle_estimate = (podgov->last_event_type == DEVICE_IDLE) ? 1000 : 0; return; } podgov->last_notification = now; window = ktime_us_delta(now, podgov->last_estimation_window); /* prevent division by 0 if events come in less than 1 usec apart */ if (window > 0) podgov->idle_estimate = (1000 * podgov->last_total_idle) / window; /* move up to the last estimation window */ if (ktime_us_delta(now, podgov->estimation_window) > podgov->p_estimation_window) { podgov->last_estimation_window = podgov->estimation_window; podgov->last_total_idle = podgov->total_idle; podgov->total_idle = 0; podgov->estimation_window = now; } }
static int s5p_dp_notify(struct notifier_block *nb, unsigned long action, void *data) { struct s5p_dp_device *dp; int ret = 0; ktime_t start; #if defined(CONFIG_V1A) || defined(CONFIG_V2A) || defined(CONFIG_CHAGALL) struct fb_event event; #endif dp = container_of(nb, struct s5p_dp_device, notifier); switch (action) { case FB_EVENT_PSR_ENTER: #if defined(CONFIG_V1A) || defined(CONFIG_V2A) || defined(CONFIG_CHAGALL) fb_notifier_call_chain(FB_EVENT_PSR_WACOM_CHECK, &event); #endif dev_dbg(dp->dev, "FB_EVENT_PSR_ENTER occurs!\n"); start = ktime_get(); ret = s5p_dp_psr_enter(dp); dev_info(dp->dev,"FB_EVENT_PSR_ENTER time = %lld us\n", ktime_us_delta(ktime_get(), start)); break; case FB_EVENT_PSR_PRE_ENTRY: dev_dbg(dp->dev, "FB_EVENT_PRE_ENTRY occurs!\n"); ret = s5p_dp_psr_pre_entry(dp); break; case FB_EVENT_PSR_EXIT: dev_dbg(dp->dev, "FB_EVENT_PSR_EXIT occurs!\n"); dp->psr_exit_state = PSR_PRE_EXIT; start = ktime_get(); ret = s5p_dp_psr_exit(dp); dev_info(dp->dev,"FB_EVENT_PSR_EXIT time = %lld us\n", ktime_us_delta(ktime_get(), start)); break; } return ret; }
static int loading_show(struct seq_file *s, void *unused) { ktime_t t = ktime_get(); int i=0; gpu_loading[curr_idx].total_time += ktime_us_delta(t, gpu_loading[curr_idx].last_start); for (i=0;i<FREQ_LEVEL;i++) seq_printf(s,"%d 0\n",gpu_loading[i].freq); return 0; }
static unsigned long scaling_state_check(struct devfreq *df, ktime_t time) { struct podgov_info_rec *podgov = df->data; unsigned long dt; long max_boost, load, damp, freq, boost, res; dt = (unsigned long) ktime_us_delta(time, podgov->last_scale); if (dt < podgov->p_block_window || df->previous_freq == 0) return 0; /* convert to mhz to avoid overflow */ freq = df->previous_freq / 1000000; max_boost = (df->max_freq/3) / 1000000; /* calculate and trace load */ load = 1000 - podgov->idle_avg; trace_podgov_busy(load); damp = podgov->p_damp; if ((1000 - podgov->idle) > podgov->p_load_max) { /* if too busy, scale up max/3, do not damp */ boost = max_boost; damp = 10; } else { /* boost = bias * freq * (load - target)/target */ boost = (load - podgov->p_load_target); boost *= (podgov->p_bias * freq); boost /= (100 * podgov->p_load_target); /* clamp to max boost */ boost = (boost < max_boost) ? boost : max_boost; } /* calculate new request */ res = freq + boost; /* Maintain average request */ podgov->freq_avg = (podgov->freq_avg * podgov->p_smooth) + res; podgov->freq_avg /= (podgov->p_smooth+1); /* Applying damping to frequencies */ res = ((damp * res) + ((10 - damp)*podgov->freq_avg)) / 10; /* Convert to hz, limit, and apply */ res = res * 1000000; scaling_limit(df, &res); trace_podgov_scaling_state_check(df->previous_freq, res); return res; }
irqreturn_t dht22_handler(int irq, void* dev_id) { ktime_t currTime; struct device* dev; struct dht22_priv* priv; s64 timeDiff; int currBit; currTime = ktime_get(); dev = (struct device*)dev_id; priv = dev_get_drvdata(dev); timeDiff = ktime_us_delta(currTime,priv->lastIntTime); trace_printk("tD = %lld us, state = %d, byte.bit = %u.%u, data = %x:%x:%x:%x:%x\n", (long long)timeDiff, priv->state, priv->byteCount, priv->bitCount, priv->rh_int, priv->rh_dec, priv->t_int, priv->t_dec, priv->checksum); switch(priv->state) { case READY: priv->state = START; break; case START: priv->state = WARMUP; break; case WARMUP: priv->state = DATA_READ; break; case DATA_READ: currBit = (timeDiff < 100) ? 0 : 1; priv->data[priv->byteCount] |= currBit << priv->bitCount; priv->bitCount--; if (priv->bitCount < 0) { priv->byteCount++; priv->bitCount = 7; } if (priv->byteCount > 4) { priv->state = DONE; wake_up(&acquisition); } if (timeDiff > 140) currTime = ktime_sub_us(currTime, timeDiff-140); break; case DONE: dev_err(dev, "Interrupt occured while state is DONE\n"); } priv->lastIntTime = currTime; return IRQ_HANDLED; }
static void update_load_estimate_actmon(struct nvhost_device_profile *profile) { ktime_t t; unsigned long dt; u32 busy_time; t = ktime_get(); dt = ktime_us_delta(t, profile->last_event_time); profile->dev_stat.total_time = dt; profile->last_event_time = t; actmon_op().read_avg_norm(profile->actmon, &busy_time); profile->dev_stat.busy_time = (busy_time * dt) / 1000; }
static void manta_lcd_on(void) { s64 us = ktime_us_delta(lcd_on_time, ktime_get_boottime()); if (us > LCD_POWER_OFF_TIME_US) { pr_warn("lcd on sleep time too long\n"); us = LCD_POWER_OFF_TIME_US; } if (us > 0) usleep_range(us, us); gpio_set_value(GPIO_LCD_EN, 1); usleep_range(200000, 200000); }
static snd_pcm_uframes_t dummy_hrtimer_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct dummy_hrtimer_pcm *dpcm = runtime->private_data; u64 delta; u32 pos; delta = ktime_us_delta(hrtimer_cb_get_time(&dpcm->timer), dpcm->base_time); delta = div_u64(delta * runtime->rate + 999999, 1000000); div_u64_rem(delta, runtime->buffer_size, &pos); return pos; }
static unsigned long scaling_state_check(struct devfreq *df, ktime_t time) { struct podgov_info_rec *podgov = df->data; unsigned long dt; /* adjustment: set scale parameters (idle_min, idle_max) +/- 25% * based on ratio of scale up to scale down hints */ if (podgov->p_adjust) scaling_adjust(podgov, time); else { podgov->idle_min = podgov->p_idle_min; podgov->idle_max = podgov->p_idle_max; } dt = (unsigned long) ktime_us_delta(time, podgov->last_scale); if (dt < podgov->p_estimation_window) return 0; podgov->last_scale = time; /* if too busy, scale up */ if (podgov->idle_estimate < podgov->idle_min) { podgov->is_scaled = 0; podgov->fast_up_count++; trace_podgov_busy(1000 - podgov->idle_estimate); trace_podgov_scaling_state_check(df->previous_freq, df->max_freq); return df->max_freq; } trace_podgov_idle(podgov->idle_estimate); if (podgov->idle_estimate > podgov->idle_max) { if (!podgov->is_scaled) podgov->is_scaled = 1; podgov->slow_down_count++; /* if idle time is high, clock down */ podgov->scale = 100 - (podgov->idle_estimate - podgov->idle_min) / 10; schedule_work(&podgov->work); } return 0; }
/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */ static int rmnet_cause_wakeup(struct rmnet_private *p) { int ret = 0; ktime_t now; if (p->timeout_us == 0) /* Check if disabled */ return 0; /* Use real (wall) time. */ now = ktime_get_real(); if (ktime_us_delta(now, p->last_packet) > p->timeout_us) { ret = 1; } p->last_packet = now; return ret; }
static int rmnet_cause_wakeup(struct rmnet_private *p) { int ret = 0; ktime_t now; if (p->timeout_us == 0) return 0; now = ktime_get_real(); if (ktime_us_delta(now, p->last_packet) > p->timeout_us) { ret = 1; } p->last_packet = now; return ret; }
void bdisp_dbg_perf_end(struct bdisp_dev *bdisp) { s64 time_us; time_us = ktime_us_delta(ktime_get(), bdisp->dbg.hw_start); if (!bdisp->dbg.min_duration) bdisp->dbg.min_duration = time_us; else bdisp->dbg.min_duration = min(time_us, bdisp->dbg.min_duration); bdisp->dbg.last_duration = time_us; bdisp->dbg.max_duration = max(time_us, bdisp->dbg.max_duration); bdisp->dbg.tot_duration += time_us; }
static void scaling_adjust(struct podgov_info_rec *podgov, ktime_t time) { long hint_ratio; int idle_min_adjustment; int idle_max_adjustment; unsigned long dt; dt = (unsigned long) ktime_us_delta(time, podgov->last_adjust); if (dt < SCALING_ADJUST_PERIOD) return; hint_ratio = (100 * (podgov->fast_up_count + 1)) / (podgov->slow_down_count + 1); if (hint_ratio > HINT_RATIO_MAX) { idle_min_adjustment = podgov->p_idle_min; idle_max_adjustment = podgov->p_idle_max; } else if (hint_ratio < HINT_RATIO_MIN) { idle_min_adjustment = -((int) podgov->p_idle_min) / 2; idle_max_adjustment = -((int) podgov->p_idle_max) / 2; } else { int diff; int factor; diff = HINT_RATIO_MID - hint_ratio; if (diff < 0) factor = -diff * 2; else { factor = -diff; diff *= 2; } idle_min_adjustment = (factor * (int) podgov->p_idle_min) / HINT_RATIO_DIFF; idle_max_adjustment = (factor * (int) podgov->p_idle_max) / HINT_RATIO_DIFF; } podgov->idle_min = podgov->p_idle_min + idle_min_adjustment; podgov->idle_max = podgov->p_idle_max + idle_max_adjustment; trace_podgov_stats(podgov->fast_up_count, podgov->slow_down_count, podgov->idle_min, podgov->idle_max); podgov->fast_up_count = 0; podgov->slow_down_count = 0; podgov->last_adjust = time; }