static void get_time_stamp(struct timeval *tv) { struct timespec ts; ktime_get_ts(&ts); tv->tv_sec = ts.tv_sec; tv->tv_usec = ts.tv_nsec / 1000; }
static inline void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp) { struct timespec ts; ktime_get_ts(&ts); time_stamp->buf_time.tv_sec = ts.tv_sec; time_stamp->buf_time.tv_usec = ts.tv_nsec/1000; do_gettimeofday(&(time_stamp->event_time)); }
void v4l2_get_timestamp(struct timeval *tv) { struct timespec ts; ktime_get_ts(&ts); tv->tv_sec = ts.tv_sec; tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; }
/** * ktime_get - get the monotonic time in ktime_t format * * returns the time in ktime_t format */ ktime_t ktime_get(void) { struct timespec now; ktime_get_ts(&now); return timespec_to_ktime(now); }
static void solo_set_time(struct solo6010_dev *solo_dev) { struct timespec ts; ktime_get_ts(&ts); solo_reg_write(solo_dev, SOLO_TIMER_SEC, ts.tv_sec); solo_reg_write(solo_dev, SOLO_TIMER_USEC, ts.tv_nsec / NSEC_PER_USEC); }
int ist30xx_get_info(struct ist30xx_data *data) { int ret; u32 calib_msg; int retry = 0; ist30xx_tsp_info.finger_num = IST30XX_MAX_MT_FINGERS; mutex_lock(&ist30xx_mutex); ist30xx_disable_irq(data); RETRY : ret = ist30xx_write_cmd(data->client, CMD_RUN_DEVICE, 0); msleep(50); ret = ist30xx_get_ver_info(data); if(ret != 0) { if(retry++ < 10) { tsp_debug("ist30xx_get_info retry : %d \n", retry); ist30xx_ts_reset(); goto RETRY; } } ret = ist30xx_tsp_update_info(); ret = ist30xx_tkey_update_info(); ist30xx_print_info(); ret = ist30xx_read_cmd(ts_data->client, CMD_GET_CALIB_RESULT, &calib_msg); if (ret == 0) { tsp_info("calib status: 0x%08x\n", calib_msg); if ((calib_msg & CALIB_MSG_MASK) != CALIB_MSG_VALID || CALIB_TO_STATUS(calib_msg) > 0) { ist30xx_calibrate(IST30XX_FW_UPDATE_RETRY); ist30xx_cmd_run_device(data->client); } } ist30xx_start(ts_data); #if IST30XX_EVENT_MODE ktime_get_ts(&t_event); #endif data->status.calib = 0; ist30xx_enable_irq(data); mutex_unlock(&ist30xx_mutex); return ret; }
u64 ovs_flow_used_time(unsigned long flow_jiffies) { struct timespec cur_ts; u64 cur_ms, idle_ms; ktime_get_ts(&cur_ts); idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC + cur_ts.tv_nsec / NSEC_PER_MSEC; return cur_ms - idle_ms; }
static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters) { int min = 0, max = 0, fail_count = 0; uint64_t sum = 0; uint64_t avg; int i; /* Allow udelay to be up to 0.5% fast */ int allowed_error_ns = usecs * 5; for (i = 0; i < iters; ++i) { struct timespec ts1, ts2; int time_passed; ktime_get_ts(&ts1); udelay(usecs); ktime_get_ts(&ts2); time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1); if (i == 0 || time_passed < min) min = time_passed; if (i == 0 || time_passed > max) max = time_passed; if ((time_passed + allowed_error_ns) / 1000 < usecs) ++fail_count; WARN_ON(time_passed < 0); sum += time_passed; } avg = sum; do_div(avg, iters); seq_printf(s, "%d usecs x %d: exp=%d allowed=%d min=%d avg=%lld max=%d", usecs, iters, usecs * 1000, (usecs * 1000) - allowed_error_ns, min, avg, max); if (fail_count) seq_printf(s, " FAIL=%d", fail_count); seq_puts(s, "\n"); return 0; }
/* * isp_video_buffer_next - Complete the current buffer and return the next one * @video: ISP video object * @error: Whether an error occured during capture * * Remove the current video buffer from the DMA queue and fill its timestamp, * field count and state fields before waking up its completion handler. * * The buffer state is set to VIDEOBUF_DONE if no error occured (@error is 0) * or VIDEOBUF_ERROR otherwise (@error is non-zero). * * The DMA queue is expected to contain at least one buffer. * * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is * empty. */ struct isp_buffer *isp_video_buffer_next(struct isp_video *video, unsigned int error) { struct isp_video_queue *queue = video->queue; enum isp_pipeline_state state; struct isp_video_buffer *buf; unsigned long flags; struct timespec ts; spin_lock_irqsave(&queue->irqlock, flags); BUG_ON(list_empty(&video->dmaqueue)); buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer, irqlist); list_del(&buf->irqlist); spin_unlock_irqrestore(&queue->irqlock, flags); ktime_get_ts(&ts); buf->vbuf.timestamp.tv_sec = ts.tv_sec; buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC; buf->vbuf.sequence = atomic_inc_return(&video->sequence); buf->state = error ? ISP_BUF_STATE_ERROR : ISP_BUF_STATE_DONE; wake_up(&buf->wait); if (list_empty(&video->dmaqueue)) { if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) state = ISP_PIPELINE_QUEUE_OUTPUT | ISP_PIPELINE_STREAM; else state = ISP_PIPELINE_QUEUE_INPUT | ISP_PIPELINE_STREAM; spin_lock_irqsave(&video->pipe->lock, flags); video->pipe->state &= ~state; spin_unlock_irqrestore(&video->pipe->lock, flags); return NULL; } if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && video->pipe->input != NULL) { spin_lock_irqsave(&video->pipe->lock, flags); video->pipe->state &= ~ISP_PIPELINE_STREAM; spin_unlock_irqrestore(&video->pipe->lock, flags); } buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer, irqlist); buf->state = ISP_BUF_STATE_ACTIVE; return to_isp_buffer(buf); }
static void cw_update_time_member_capacity_change(struct cw_battery *cw_bat) { struct timespec ts; int new_run_time; int new_sleep_time; ktime_get_ts(&ts); new_run_time = ts.tv_sec; get_monotonic_boottime(&ts); new_sleep_time = ts.tv_sec - new_run_time; cw_bat->run_time_capacity_change = new_run_time; cw_bat->sleep_time_capacity_change = new_sleep_time; }
static void vpbe_isr_even_field(struct vpbe_display *disp_obj, struct vpbe_layer *layer) { struct timespec timevalue; if (layer->cur_frm == layer->next_frm) return; ktime_get_ts(&timevalue); layer->cur_frm->ts.tv_sec = timevalue.tv_sec; layer->cur_frm->ts.tv_usec = timevalue.tv_nsec / NSEC_PER_USEC; layer->cur_frm->state = VIDEOBUF_DONE; wake_up_interruptible(&layer->cur_frm->done); layer->cur_frm = layer->next_frm; }
unsigned long printk_clock( void ) { if( clk_source_inited ) { struct timespec ts; #if 0 getnstimeofday( &ts ); getboottime(&ts ); #else ktime_get_ts( &ts ); #endif return ts.tv_sec+clk_source_suspend_second; } return 0; }
int ist30xx_tracking(u32 status) { u32 ms; if (!tracking_initialize) ist30xx_tracking_init(); ktime_get_ts(&t_track); ms = t_track.tv_sec * 1000 + t_track.tv_nsec / 1000000; ist30xx_put_track_ms(ms); ist30xx_put_track(&status, 1); return 0; }
/////////////////////////////////////////////////////add by isuma //For vsync report static long long s3cfb_get_system_time(void) { long long vsync; #if 0// much more precise struct timeval tv; do_gettimeofday(&tv); vsync = (long long)(tv.tv_sec)*1000000000LL + tv.tv_usec*1000; //printk("do_gettimeofday sec:%d usec:%d\n",tv.tv_sec,tv.tv_usec); #else//do_gettimeofday is preciser than current_kernel_time and ktime_get_ts, but not match with userspace struct timespec ts; //ts = current_kernel_time(); ktime_get_ts(&ts); vsync = (long long)(ts.tv_sec)*1000000000LL + ts.tv_nsec; //printk("current_kernel_time sec:%d nsec:%d\n",ts.tv_sec,ts.tv_nsec); #endif return vsync; }
void ist30xx_start(struct ist30xx_data *data) { #if IST30XX_DETECT_TA if (ist30xx_ta_status > -1) { ist30xx_write_cmd(data->client, CMD_SET_TA_MODE, ist30xx_ta_status); tsp_info("%s(), ta_mode : %d\n", __func__, ist30xx_ta_status); } #endif ist30xx_cmd_start_scan(data->client); #if IST30XX_EVENT_MODE if ((data->status.update != 1) && (data->status.calib != 1)) ktime_get_ts(&t_event); #endif }
static int msm_gesture_handle_event(struct v4l2_subdev *sd, struct msm_gesture_ctrl *p_gesture_ctrl, void* arg) { int rc = 0; struct v4l2_event *evt = (struct v4l2_event *)arg; struct msm_ges_evt *p_ges_evt = NULL; D("%s: Received gesture evt 0x%x ", __func__, evt->type); p_gesture_ctrl->event.evt_len = 0; p_gesture_ctrl->event.evt_data = NULL; if (0 != evt->u.data[0]) { p_ges_evt = (struct msm_ges_evt *)evt->u.data; D("%s: event data %p len %d", __func__, p_ges_evt->evt_data, p_ges_evt->evt_len); if (p_ges_evt->evt_len > 0) { p_gesture_ctrl->event.evt_data = kzalloc(p_ges_evt->evt_len, GFP_KERNEL); if (NULL == p_gesture_ctrl->event.evt_data) { pr_err("%s: cannot allocate event", __func__); rc = -ENOMEM; } else { if (copy_from_user( (void *)p_gesture_ctrl->event.evt_data, (void __user *)p_ges_evt->evt_data, p_ges_evt->evt_len)) { pr_err("%s: copy_from_user failed", __func__); rc = -EFAULT; } else { D("%s: copied the event", __func__); p_gesture_ctrl->event.evt_len = p_ges_evt->evt_len; } } } } if (rc == 0) { ktime_get_ts(&evt->timestamp); v4l2_event_queue(&sd->devnode, evt); } D("%s: exit rc %d ", __func__, rc); return rc; }
int ts_algo_t1(struct ts_device_data *dev_data, struct ts_fingers *in_info, struct ts_fingers *out_info) { int index; int id; for(index = 0, id = 0; index < TS_MAX_FINGER; index++, id++) { if (in_info->cur_finger_number == 0) { if (index < FILTER_GLOVE_NUMBER) { touch_pos_x[index] = -1; touch_pos_y[index] = -1; if (touch_state == FINGER_STATE) {/*this is a finger release*/ ktime_get_ts(&pre_finger_time[index]); } } out_info->fingers[0].status = TS_FINGER_RELEASE; if (id >= 1) out_info->fingers[id].status = 0; } else { if ((in_info->fingers[index].x != 0) ||(in_info->fingers[index].y != 0)) { if (index < FILTER_GLOVE_NUMBER) { if (filter_illegal_glove(index, in_info) == 0) { out_info->fingers[id].status = 0; } else { out_info->fingers[id].x = in_info->fingers[index].x; out_info->fingers[id].y = in_info->fingers[index].y; out_info->fingers[id].pressure = in_info->fingers[index].pressure; out_info->fingers[id].status = TS_FINGER_PRESS; } } else { out_info->fingers[id].x = in_info->fingers[index].x; out_info->fingers[id].y = in_info->fingers[index].y; out_info->fingers[id].pressure = in_info->fingers[index].pressure; out_info->fingers[id].status = TS_FINGER_PRESS; } } else out_info->fingers[id].status = 0; } } out_info->gesture_wakeup_value = in_info->gesture_wakeup_value; out_info->special_button_key = in_info->special_button_key; out_info->special_button_flag = in_info->special_button_flag; return NO_ERR; }
struct spu_context *alloc_spu_context(struct spu_gang *gang) { struct spu_context *ctx; struct timespec ts; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) goto out; /* Binding to physical processor deferred * until spu_activate(). */ if (spu_init_csa(&ctx->csa)) goto out_free; spin_lock_init(&ctx->mmio_lock); mutex_init(&ctx->mapping_lock); kref_init(&ctx->kref); mutex_init(&ctx->state_mutex); mutex_init(&ctx->run_mutex); init_waitqueue_head(&ctx->ibox_wq); init_waitqueue_head(&ctx->wbox_wq); init_waitqueue_head(&ctx->stop_wq); init_waitqueue_head(&ctx->mfc_wq); init_waitqueue_head(&ctx->run_wq); ctx->state = SPU_STATE_SAVED; ctx->ops = &spu_backing_ops; ctx->owner = get_task_mm(current); INIT_LIST_HEAD(&ctx->rq); INIT_LIST_HEAD(&ctx->aff_list); if (gang) spu_gang_add_ctx(gang, ctx); __spu_update_sched_info(ctx); spu_set_timeslice(ctx); ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; ktime_get_ts(&ts); ctx->stats.tstamp = timespec_to_ns(&ts); atomic_inc(&nr_spu_contexts); goto out; out_free: kfree(ctx); ctx = NULL; out: return ctx; }
static void mmpfb_vsync_cb(void *data) { struct timespec vsync_time; struct mmpfb_info *fbi = (struct mmpfb_info *)data; /* in vsync callback */ mmpfb_vcnt_clean(fbi); /* Get time stamp of vsync */ ktime_get_ts(&vsync_time); fbi->vsync.ts_nano = ((uint64_t)vsync_time.tv_sec) * 1000 * 1000 * 1000 + ((uint64_t)vsync_time.tv_nsec); if (atomic_read(&fbi->op_count)) { queue_work(fbi->vsync.wq, &fbi->vsync.work); queue_work(fbi->vsync.wq, &fbi->vsync.fence_work); } }
static long estimate_accuracy(struct timespec *tv) { unsigned long ret; struct timespec now; /* * Realtime tasks get a slack of 0 for obvious reasons. */ if (rt_task(current)) return 0; ktime_get_ts(&now); now = timespec_sub(*tv, now); ret = __estimate_accuracy(&now); if (ret < current->timer_slack_ns) return current->timer_slack_ns; return ret; }
static int isp_stat_buf_queue(struct ispstat *stat) { if (!stat->active_buf) return STAT_NO_BUF; ktime_get_ts(&stat->active_buf->ts); stat->active_buf->buf_size = stat->buf_size; if (isp_stat_buf_check_magic(stat, stat->active_buf)) { dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n", stat->subdev.name); return STAT_NO_BUF; } stat->active_buf->config_counter = stat->config_counter; stat->active_buf->frame_number = stat->frame_number; stat->active_buf->empty = 0; stat->active_buf = NULL; return STAT_BUF_DONE; }
static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct sta_info *sta = file->private_data; struct timespec uptime; struct tm result; long connected_time_secs; char buf[100]; int res; ktime_get_ts(&uptime); connected_time_secs = uptime.tv_sec - sta->last_connected; time_to_tm(connected_time_secs, 0, &result); result.tm_year -= 70; result.tm_mday -= 1; res = scnprintf(buf, sizeof(buf), "years - %ld\nmonths - %d\ndays - %d\nclock - %d:%d:%d\n\n", result.tm_year, result.tm_mon, result.tm_mday, result.tm_hour, result.tm_min, result.tm_sec); return simple_read_from_buffer(userbuf, count, ppos, buf, res); }
int stml0xx_ms_data_buffer_write(struct stml0xx_data *ps_stml0xx, unsigned char type, unsigned char *data, int size) { int new_head; struct stml0xx_moto_sensor_data *buffer; struct timespec ts; static bool error_reported; new_head = (ps_stml0xx->stml0xx_ms_data_buffer_head + 1) & STML0XX_MS_DATA_QUEUE_MASK; if (new_head == ps_stml0xx->stml0xx_ms_data_buffer_tail) { if (!error_reported) { dev_err(&stml0xx_misc_data->spi->dev, "ms data buffer full"); error_reported = true; } wake_up(&ps_stml0xx->stml0xx_ms_data_wq); return 0; } buffer = &(ps_stml0xx->stml0xx_ms_data_buffer[new_head]); buffer->type = type; if (data != NULL && size > 0) { if (size > sizeof(buffer->data)) { dev_err(&stml0xx_misc_data->spi->dev, "size %d exceeds ms buffer", size); return 0; } memcpy(buffer->data, data, size); } buffer->size = size; ktime_get_ts(&ts); buffer->timestamp = ts.tv_sec * 1000000000LL + ts.tv_nsec; ps_stml0xx->stml0xx_ms_data_buffer_head = new_head; wake_up(&ps_stml0xx->stml0xx_ms_data_wq); error_reported = false; return 1; }
void proc_id_connector(struct task_struct *task, int which_id) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; const struct cred *cred; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); ev->what = which_id; ev->event_data.id.process_pid = task->pid; ev->event_data.id.process_tgid = task->tgid; rcu_read_lock(); cred = __task_cred(task); if (which_id == PROC_EVENT_UID) { ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid); ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid); } else if (which_id == PROC_EVENT_GID) { ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid); ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid); } else { rcu_read_unlock(); return; } rcu_read_unlock(); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ ev->timestamp_ns = timespec_to_ns(&ts); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); }
void proc_id_connector(struct task_struct *task, int which_id) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE]; struct timespec ts; const struct cred *cred; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = (struct cn_msg*)buffer; ev = (struct proc_event*)msg->data; ev->what = which_id; ev->event_data.id.process_pid = task->pid; ev->event_data.id.process_tgid = task->tgid; rcu_read_lock(); cred = __task_cred(task); if (which_id == PROC_EVENT_UID) { ev->event_data.id.r.ruid = cred->uid; ev->event_data.id.e.euid = cred->euid; } else if (which_id == PROC_EVENT_GID) { ev->event_data.id.r.rgid = cred->gid; ev->event_data.id.e.egid = cred->egid; } else { rcu_read_unlock(); return; } rcu_read_unlock(); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); proc_get_exe(task, ev->event_data.id.exe); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); }
static int alarm_get_time(enum android_alarm_type alarm_type, struct timespec *ts) { int rv = 0; switch (alarm_type) { case ANDROID_ALARM_RTC_WAKEUP: case ANDROID_ALARM_RTC: getnstimeofday(ts); break; case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP: case ANDROID_ALARM_ELAPSED_REALTIME: get_monotonic_boottime(ts); break; case ANDROID_ALARM_SYSTEMTIME: ktime_get_ts(ts); break; default: rv = -EINVAL; } return rv; }
static void solo_timer_sync(struct solo6010_dev *solo_dev) { u32 sec, usec; struct timespec ts; long diff; if (solo_dev->type != SOLO_DEV_6110) return; if (++solo_dev->time_sync < 60) return; solo_dev->time_sync = 0; sec = solo_reg_read(solo_dev, SOLO_TIMER_SEC); usec = solo_reg_read(solo_dev, SOLO_TIMER_USEC); ktime_get_ts(&ts); diff = (long)ts.tv_sec - (long)sec; diff = (diff * 1000000) + ((long)(ts.tv_nsec / NSEC_PER_USEC) - (long)usec); if (diff > 1000 || diff < -1000) { solo_set_time(solo_dev); } else if (diff) { long usec_lsb = solo_dev->usec_lsb; usec_lsb -= diff / 4; if (usec_lsb < 0) usec_lsb = 0; else if (usec_lsb > 255) usec_lsb = 255; solo_dev->usec_lsb = usec_lsb; solo_reg_write(solo_dev, SOLO_TIMER_USEC_LSB, solo_dev->usec_lsb); } }
/* * Send an acknowledgement message to userspace * * Use 0 for success, EFOO otherwise. * Note: this is the negative of conventional kernel error * values because it's not being returned via syscall return * mechanisms. */ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE]; struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = (struct cn_msg*)buffer; ev = (struct proc_event*)msg->data; msg->seq = rcvd_seq; ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->cpu = -1; ev->what = PROC_EVENT_NONE; ev->event_data.ack.err = err; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = rcvd_ack + 1; msg->len = sizeof(*ev); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); }
void proc_exec_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; struct timespec ts; __u8 buffer[CN_PROC_MSG_SIZE]; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = (struct cn_msg*)buffer; ev = (struct proc_event*)msg->data; get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->what = PROC_EVENT_EXEC; ev->event_data.exec.process_pid = task->pid; ev->event_data.exec.process_tgid = task->tgid; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); }
static s64 get_time_ns(void) { struct timespec ts; ktime_get_ts(&ts); return timespec_to_ns(&ts); }