void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, struct skb_shared_hwtstamps *hwts, u64 timestamp) { u64 nsec; nsec = timecounter_cyc2time(&mdev->clock, timestamp); memset(hwts, 0, sizeof(struct skb_shared_hwtstamps)); hwts->hwtstamp = ns_to_ktime(nsec); }
static ssize_t k3g_show_delay(struct device *dev, struct device_attribute *attr, char *buf) { struct k3g_data *k3g_data = dev_get_drvdata(dev); u64 delay; delay = k3g_data->time_to_read * k3g_data->entries; delay = ktime_to_ns(ns_to_ktime(delay)); return sprintf(buf, "%lld\n", delay); }
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, struct skb_shared_hwtstamps *hwts) { u64 nsec; read_lock(&tstamp->lock); nsec = timecounter_cyc2time(&tstamp->clock, timestamp); read_unlock(&tstamp->lock); hwts->hwtstamp = ns_to_ktime(nsec); }
void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) { rt_b->rt_period = ns_to_ktime(period); rt_b->rt_runtime = runtime; raw_spin_lock_init(&rt_b->rt_runtime_lock); hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rt_b->rt_period_timer.function = sched_rt_period_timer; }
static void set_polling_delay(struct k3g_data *k3g_data, int res) { s64 delay_ns; delay_ns = k3g_data->entries + 1 - res; if (delay_ns < 0) delay_ns = 0; delay_ns = delay_ns * k3g_data->time_to_read; k3g_data->polling_delay = ns_to_ktime(delay_ns); }
int secos_booster_start(enum secos_boost_policy policy) { int ret = 0; int freq; current_core = mc_active_core(); /* migrate to big Core */ if ((policy != MAX_PERFORMANCE) && (policy != MID_PERFORMANCE) && (policy != MIN_PERFORMANCE)) { pr_err("%s: wrong secos boost policy:%d\n", __func__, policy); ret = -EINVAL; goto error; } /* cpufreq configuration */ if (policy == MAX_PERFORMANCE) freq = max_cpu_freq; else if (policy == MID_PERFORMANCE) freq = MID_CPUFREQ; else freq = 0; pm_qos_update_request(&secos_booster_qos, freq); /* KHz */ if (!cpu_online(DEFAULT_BIG_CORE)) { pr_debug("%s: %d core is offline\n", __func__, DEFAULT_BIG_CORE); udelay(100); if (!cpu_online(DEFAULT_BIG_CORE)) { pr_debug("%s: %d core is offline\n", __func__, DEFAULT_BIG_CORE); pm_qos_update_request(&secos_booster_qos, 0); ret = -EPERM; goto error; } pr_debug("%s: %d core is online\n", __func__, DEFAULT_BIG_CORE); } ret = mc_switch_core(DEFAULT_BIG_CORE); if (ret) { pr_err("%s: mc switch failed : err:%d\n", __func__, ret); pm_qos_update_request(&secos_booster_qos, 0); goto error; } /* Change schedule policy */ mc_set_schedule_policy(DEFAULT_BIG_CORE); /* Restore origin performance policy after default boost time */ hrtimer_cancel(&timer); hrtimer_start(&timer, ns_to_ktime((u64)DEFAULT_SECOS_BOOST_TIME * NSEC_PER_MSEC), HRTIMER_MODE_REL); error: return ret; }
static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md) { ktime_t kt_deadline; if (!md->seq_rq_merge_deadline_usecs) return false; kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC); kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline); return !ktime_after(ktime_get(), kt_deadline); }
static ssize_t k3g_show_delay(struct device *dev, struct device_attribute *attr, char *buf) { struct k3g_data *k3g_data = dev_get_drvdata(dev); u64 delay; if(DEBUG_FUNC_TRACE & debug_mask) printk(KERN_INFO "%s: line %d\n", __func__, __LINE__); delay = k3g_data->time_to_read * k3g_data->entries; delay = ktime_to_ns(ns_to_ktime(delay)); return sprintf(buf, "%lld\n", delay); }
static void __start_tbs(void * data) { ktime_t ktime; unsigned long long delay_in_ns = g_tbs_settings.interval * NSEC_PER_USEC; struct hrtimer *hrtimer = &__get_cpu_var(hrtimer_for_tbs); ktime = ktime_set(0, delay_in_ns); hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = hrtimer_func; hrtimer_start(hrtimer, ns_to_ktime(delay_in_ns), HRTIMER_MODE_REL_PINNED); }
static enum hrtimer_restart gpioc_hrtimer_func(struct hrtimer *hrtimer) { struct vib_ctrl_gpio *gpioc = container_of(hrtimer, struct vib_ctrl_gpio, hrtimer); struct vib_signal *vibs = container_of(gpioc, struct vib_signal, gpioc); struct vib_of_signal *of = &vibs->of; if (gpioc->stage == GPIO_STAGE_ACTIVE) { if (vibs->signal_type == SIGNAL_ENABLE) { gpio_direction_output(of->gpio, !of->active_level); dvib_tprint("g-t %s\n", vibs->name); gpioc->stage = GPIO_STAGE_INACTIVE; } if (gpioc->inactive_us) { if (vibs->signal_type == SIGNAL_DIRECTION) { gpio_direction_output(of->gpio, !of->active_level); dvib_tprint("g-t %s\n", vibs->name); gpioc->stage = GPIO_STAGE_INACTIVE; } hrtimer_start(&gpioc->hrtimer, ns_to_ktime((u64) gpioc->inactive_us * NSEC_PER_USEC), HRTIMER_MODE_REL); } } else { if (gpioc->active_us) { dvib_tprint("g+t %s\n", vibs->name); gpio_direction_output(of->gpio, of->active_level); gpioc->stage = GPIO_STAGE_ACTIVE; hrtimer_start(&gpioc->hrtimer, ns_to_ktime((u64) gpioc->active_us * NSEC_PER_USEC), HRTIMER_MODE_REL); } } return HRTIMER_NORESTART; }
static void __oprofile_hrtimer_start(void *unused) { struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer); if (!ctr_running) return; hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = oprofile_hrtimer_notify; hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC), HRTIMER_MODE_REL_PINNED); }
static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer) { oprofile_add_sample(get_irq_regs(), 0); hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC)); #if defined(CONFIG_MP_DEBUG_TOOL_OPROFILE) #ifdef CONFIG_ADVANCE_OPROFILE atomic_inc(&g_aop_pc_sample_count); #endif/* CONFIG_ADVANCE_OPROFILE */ #endif /* CONFIG_MP_DEBUG_TOOL_OPROFILE */ return HRTIMER_RESTART; }
static int dd_init(void) { int ret = 0; struct dd_data *data_po; printk(KERN_INFO "****************************** \n"); printk(KERN_INFO " sysfs test driver loaded\n"); printk(KERN_INFO "****************************** \n"); data_po = kzalloc(sizeof(struct dd_data), GFP_KERNEL); if (!data_po){ return -ENOMEM; } data_po->workqueue_delay = WORKQUEUE_DELAY; hrtimer_init(&data_po->polling_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); data_po->polling_delay = ns_to_ktime(HRTIMER_DELAY * 100 * NSEC_PER_USEC); data_po->polling_timer.function = polling_timer_func; g_data_po =data_po; data_po->test_class =class_create(THIS_MODULE, "dd_sensors"); if(IS_ERR(data_po->test_class)){ ret = PTR_ERR(data_po->test_class); data_po->test_class = NULL; // goto err_create_class; } // device_create - creates a device and registers it with sysfs data_po->test_dev = device_create(data_po->test_class, NULL, 0, "%s", "dd"); if(unlikely(IS_ERR(data_po->test_dev))){ ret = PTR_ERR(data_po->test_dev); data_po->test_dev = NULL; // goto err_create_test_device; } // device_create_file - create sysfs attribute file for device. // ret =device_create_file(data_po->test_dev, &dev_attr_test); ret = sysfs_create_group( &data_po->test_dev->kobj, &dd_attribute_group) ; if(ret){ // goto err_create_test_device_file; } INIT_WORK( &data_po->test1_work, test1_work_func); INIT_DELAYED_WORK( &data_po->test2_delayed_work, test2_delayed_work_func) ; //err_create_singlethread_workqueue: return 0; }
static void vibrator_enable(struct timed_output_dev *dev, int value) { #if SUPPORT_TIMED_OUTPUT char mode; hrtimer_cancel(&vibdata.timer); cancel_work_sync(&vibdata.work); #if SUPPORT_WRITE_PAT cancel_work_sync(&vibdata.pat_work); #endif mutex_lock(&vibdata.lock); if (value) { wake_lock(&vibdata.wklock); drv2604_read_reg(STATUS_REG); /* Added by Ken on 20120531 */ if (!g_bAmpEnabled) { mode = drv2604_read_reg(MODE_REG) & DRV2604_MODE_MASK; /* Modified by Ken on 20120530 */ #if DRV2604_USE_RTP_MODE /* Only change the mode if not already in RTP mode; RTP input already set at init */ if (mode != MODE_REAL_TIME_PLAYBACK) { drv2604_change_mode(MODE_REAL_TIME_PLAYBACK); drv2604_set_rtp_val(vibe_strength); vibrator_is_playing = YES; g_bAmpEnabled = true; } #endif #if DRV2604_USE_PWM_MODE /* Only change the mode if not already in PWM mode */ if (mode != MODE_PWM_OR_ANALOG_INPUT) { pwm_duty_enable(vibdata.pwm_dev, 0); drv2604_change_mode(MODE_PWM_OR_ANALOG_INPUT); vibrator_is_playing = YES; g_bAmpEnabled = true; } #endif } if (value > 0) { if (value > MAX_TIMEOUT) value = MAX_TIMEOUT; hrtimer_start(&vibdata.timer, ns_to_ktime((u64)value * NSEC_PER_MSEC), HRTIMER_MODE_REL); } } else vibrator_off(); mutex_unlock(&vibdata.lock); #endif // SUPPORT_TIMED_OUTPUT }
static int register_pcc_channel(int pcc_ss_idx) { struct acpi_pcct_hw_reduced *cppc_ss; u64 usecs_lat; if (pcc_ss_idx >= 0) { pcc_data[pcc_ss_idx]->pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx); if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) { pr_err("Failed to find PCC communication channel\n"); return -ENODEV; } /* * The PCC mailbox controller driver should * have parsed the PCCT (global table of all * PCC channels) and stored pointers to the * subspace communication region in con_priv. */ cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv; if (!cppc_ss) { pr_err("No PCC subspace found for CPPC\n"); return -ENODEV; } /* * cppc_ss->latency is just a Nominal value. In reality * the remote processor could be much slower to reply. * So add an arbitrary amount of wait on top of Nominal. */ usecs_lat = NUM_RETRIES * cppc_ss->latency; pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time; pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate; pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency; pcc_data[pcc_ss_idx]->pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) { pr_err("Failed to ioremap PCC comm region mem\n"); return -ENOMEM; } /* Set flag so that we dont come here for each CPU. */ pcc_data[pcc_ss_idx]->pcc_channel_acquired = true; } return 0; }
enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle) { struct snd_pcsp *chip = container_of(handle, struct snd_pcsp, timer); struct snd_pcm_substream *substream; int periods_elapsed, pointer_update; size_t period_bytes, buffer_bytes; unsigned long ns; unsigned long flags; pointer_update = !chip->thalf; ns = pcsp_timer_update(handle); if (!ns) return HRTIMER_NORESTART; /* update the playback position */ substream = chip->playback_substream; if (!substream) return HRTIMER_NORESTART; period_bytes = snd_pcm_lib_period_bytes(substream); buffer_bytes = snd_pcm_lib_buffer_bytes(substream); spin_lock_irqsave(&chip->substream_lock, flags); chip->playback_ptr += PCSP_INDEX_INC() * chip->fmt_size; periods_elapsed = chip->playback_ptr - chip->period_ptr; if (periods_elapsed < 0) { #if PCSP_DEBUG printk(KERN_INFO "PCSP: buffer_bytes mod period_bytes != 0 ? " "(%zi %zi %zi)\n", chip->playback_ptr, period_bytes, buffer_bytes); #endif periods_elapsed += buffer_bytes; } periods_elapsed /= period_bytes; /* wrap the pointer _before_ calling snd_pcm_period_elapsed(), * or ALSA will BUG on us. */ chip->playback_ptr %= buffer_bytes; if (periods_elapsed) { chip->period_ptr += periods_elapsed * period_bytes; chip->period_ptr %= buffer_bytes; } spin_unlock_irqrestore(&chip->substream_lock, flags); if (periods_elapsed) tasklet_schedule(&pcsp_pcm_tasklet); hrtimer_forward(handle, hrtimer_get_expires(handle), ns_to_ktime(ns)); return HRTIMER_RESTART; }
static ssize_t al3320_store_als_poll_delay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct input_dev *input = to_input_dev(dev); struct al3320_data *data = input_get_drvdata(input); unsigned long val; if (strict_strtoul(buf, 10, &val) < 0) return -EINVAL; data->light_poll_delay = ns_to_ktime(val*1000); return count; }
void MonitorTimer_Request(MonTimer *monTimer, uint64 when64) { if (when64) { ktime_t kt; kt = ns_to_ktime(when64); ASSERT_ON_COMPILE(MVP_TIMER_RATE64 == 1000000000); hrtimer_start(&monTimer->timer, kt, HRTIMER_MODE_ABS); } else { hrtimer_cancel(&monTimer->timer); } }
bool_t register_clocksource(struct device_t ** device, struct clocksource_t * cs) { struct device_t * dev; irq_flags_t flags; if(!cs || !cs->name || !cs->read) return FALSE; dev = malloc(sizeof(struct device_t)); if(!dev) return FALSE; cs->keeper.interval = clocksource_deferment(cs) >> 1; cs->keeper.last = clocksource_cycle(cs); cs->keeper.nsec = 0; seqlock_init(&cs->keeper.lock); timer_init(&cs->keeper.timer, clocksource_keeper_timer_function, cs); dev->name = strdup(cs->name); dev->type = DEVICE_TYPE_CLOCKSOURCE; dev->driver = NULL; dev->priv = cs; dev->kobj = kobj_alloc_directory(dev->name); kobj_add_regular(dev->kobj, "mult", clocksource_read_mult, NULL, cs); kobj_add_regular(dev->kobj, "shift", clocksource_read_shift, NULL, cs); kobj_add_regular(dev->kobj, "period", clocksource_read_period, NULL, cs); kobj_add_regular(dev->kobj, "deferment", clocksource_read_deferment, NULL, cs); kobj_add_regular(dev->kobj, "cycle", clocksource_read_cycle, NULL, cs); kobj_add_regular(dev->kobj, "time", clocksource_read_time, NULL, cs); if(!register_device(dev)) { kobj_remove_self(dev->kobj); free(dev->name); free(dev); return FALSE; } if(__clocksource == &__cs_dummy) { spin_lock_irqsave(&__clocksource_lock, flags); __clocksource = cs; spin_unlock_irqrestore(&__clocksource_lock, flags); } timer_start_now(&cs->keeper.timer, ns_to_ktime(cs->keeper.interval)); if(device) *device = dev; return TRUE; }
static ssize_t store_ami_poll_delay(struct device* dev, struct device_attribute* attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct ami306_dev_data *pdata = i2c_get_clientdata(client); int64_t new_delay; int err; err = strict_strtoll(buf, 10, &new_delay); if (err < 0) return err; pdata->poll_delay = ns_to_ktime(new_delay); return count; }
static int migrate_init(void) { /* timer restart interval */ timer_interval_ns = 2000000000; /* the process to migrate */ pid = 1; hrtimer_init(&migrate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); migrate_hrtimer.function = hrtimer_def; hrtimer_start(&migrate_hrtimer, ns_to_ktime(timer_interval_ns), HRTIMER_MODE_REL); return 0; }
static void set_polling_delay(struct k3g_data *k3g_data, int res) { s64 delay_ns; if(DEBUG_FUNC_TRACE & debug_mask) printk(KERN_INFO "%s: line %d\n", __func__, __LINE__); delay_ns = k3g_data->entries + 1 - res; if (delay_ns < 0) delay_ns = 0; delay_ns = delay_ns * k3g_data->time_to_read; k3g_data->polling_delay = ns_to_ktime(delay_ns); }
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, struct skb_shared_hwtstamps *hwts, u64 timestamp) { unsigned long flags; u64 nsec; read_lock_irqsave(&mdev->clock_lock, flags); nsec = timecounter_cyc2time(&mdev->clock, timestamp); read_unlock_irqrestore(&mdev->clock_lock, flags); memset(hwts, 0, sizeof(struct skb_shared_hwtstamps)); hwts->hwtstamp = ns_to_ktime(nsec); }
// Sets the hardware timer to go off at a specified time void __rk_update_hw_timer_cpu(struct rk_timer *tmr, int cpunum) { cpu_tick_data_t now, val; ktime_t delta, nowhr; rk_rdtsc(&now); #ifndef RK_GLOBAL_SCHED if (cpunum != smp_processor_id()) { printk("rk_update_hw_timer_cpu: ERROR - cpunum:%d, curcpu:%d, type:%d\n", cpunum, smp_processor_id(), tmr->tmr_type); return; } #endif if (tmr == NULL) { printk("rk_update_hw_timer: Called with a NULL timer\n"); return; } if (tmr != NULL && tmr->tmr_expire < now) { // We are setting a timer in the past, do the best that we can val = RK_MIN_TIMER; } else { val = (tmr->tmr_expire - now); if (val < RK_MIN_TIMER) val = RK_MIN_TIMER; } delta = ns_to_ktime((u64)(val)); nowhr = ptr_rk_virtual_timer(cpunum)->t.base->get_time(); ptr_rk_virtual_timer(cpunum)->t.node.expires = nowhr; ptr_rk_virtual_timer(cpunum)->t._softexpires = nowhr; hrtimer_forward(&(ptr_rk_virtual_timer(cpunum)->t), nowhr, delta); // Note: Do not check hrtimer_callback_running, and use HRTIMER_NORESTART in rk_timer_isr() // - When we use HRTIMER_RESTART, __run_hrtimer() checks whether enqueuing happened // while serving timer handler (rk_timer_isr). // This is why we checked if the timer handler is running, using hrtimer_callback_running(). // - However, it turned out that hrtimer_callback_running() is not reliable // when it is called by other CPUs. This may cause breaking the hrtimer constraint. // - When we use HRTIMER_NORESTART, we can avoid this issue. // There's no extra overhead for calling hrtimer_start() directly, // because reprogramming will eventually happen only once when the timer handler is running. //if (!hrtimer_callback_running(&(ptr_rk_virtual_timer(cpunum)->t))) { hrtimer_start(&(ptr_rk_virtual_timer(cpunum)->t), ptr_rk_virtual_timer(cpunum)->t.node.expires, RK_HRTIMER_MODE); //} //printk("U: %llu\n", ptr_rk_virtual_timer(cpunum)->_expires.tv64); }
static void restart_watchdog_hrtimer(void *info) { struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); int ret; /* * No need to cancel and restart hrtimer if it is currently executing * because it will reprogram itself with the new period now. * We should never see it unqueued here because we are running per-cpu * with interrupts disabled. */ ret = hrtimer_try_to_cancel(hrtimer); if (ret == 1) hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL_PINNED); }
static int timer_online(unsigned long setup) { if (!setup) { pr_err("hrtimer_module: cannot start due to a hrtimer value of zero"); return -1; } else if (hrtimer_running) { pr_notice("hrtimer_module: high res timer already running"); return 0; } hrtimer_running = 1; interval = ns_to_ktime(1000000000UL / setup); on_each_cpu(__timer_online, NULL, 1); return 0; }
static int clocksource_keeper_timer_function(struct timer_t * timer, void * data) { struct clocksource_t * cs = (struct clocksource_t *)(data); u64_t now, delta, offset; irq_flags_t flags; write_seqlock_irqsave(&cs->keeper.lock, flags); now = clocksource_cycle(cs); delta = clocksource_delta(cs, cs->keeper.last, now); offset = clocksource_delta2ns(cs, delta); cs->keeper.nsec += offset; cs->keeper.last = now; write_sequnlock_irqrestore(&cs->keeper.lock, flags); timer_forward_now(timer, ns_to_ktime(cs->keeper.interval)); return 1; }
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, struct skb_shared_hwtstamps *hwts, uint64_t timestamp) { panic("Disabled"); #if 0 // AKAROS_PORT unsigned long flags; uint64_t nsec; read_lock_irqsave(&mdev->clock_lock, flags); nsec = timecounter_cyc2time(&mdev->clock, timestamp); read_unlock_irqrestore(&mdev->clock_lock, flags); memset(hwts, 0, sizeof(struct skb_shared_hwtstamps)); hwts->hwtstamp = ns_to_ktime(nsec); #endif }
static void sii9234_adc_work_func(struct work_struct *work) { int acc_adc; bool adc_mhl_detected; acc_adc = batt_read_adc(CHANNEL_ADC_ACC, NULL); pr_info("%s: acc_adc=%d\n", __func__, acc_adc); if(acc_adc >= 0 && acc_adc <= 70) { adc_mhl_detected = true; } else { adc_mhl_detected = false; } if(old_adc_mhl_detected != adc_mhl_detected) { pr_info("%s: adc_mhl_detected=%d,%d\n", __func__, adc_mhl_detected,adc_detect_safe_count); if(++adc_detect_safe_count > 2) { old_adc_mhl_detected = adc_mhl_detected; adc_detect_safe_count = 0; if(adc_mhl_detected) { MHL_On(true); } else { MHL_On(false); } } } else { adc_detect_safe_count = 0; } if(!old_adc_mhl_detected) hrtimer_start(&sii9234_adc_timer, ns_to_ktime(250000000/*500ms*/), HRTIMER_MODE_REL); }
static int gator_hrtimer_init(int interval, void (*func)(void)) { int cpu; (callback) = (func); for_each_present_cpu(cpu) { per_cpu(hrtimer_is_active, cpu) = 0; } /* calculate profiling interval */ if (interval > 0) profiling_interval = ns_to_ktime(1000000000UL / interval); else profiling_interval.tv64 = 0; return 0; }