示例#1
0
static long kfd_ioctl_get_clock_counters(struct file *filep,
				struct kfd_process *p, void __user *arg)
{
	struct kfd_ioctl_get_clock_counters_args args;
	struct kfd_dev *dev;
	struct timespec time;

	if (copy_from_user(&args, arg, sizeof(args)))
		return -EFAULT;

	dev = kfd_device_by_id(args.gpu_id);
	if (dev == NULL)
		return -EINVAL;

	/* Reading GPU clock counter from KGD */
	args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);

	/* No access to rdtsc. Using raw monotonic time */
	getrawmonotonic(&time);
	args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);

	get_monotonic_boottime(&time);
	args.system_clock_counter = (uint64_t)timespec_to_ns(&time);

	/* Since the counter is in nano-seconds we use 1GHz frequency */
	args.system_clock_freq = 1000000000;

	if (copy_to_user(arg, &args, sizeof(args)))
		return -EFAULT;

	return 0;
}
示例#2
0
int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
{
	s64 tmp;
	unsigned long t1;
	unsigned long long t2, t3;
	unsigned long flags;
	struct timespec ts;

	/* Though tsk->delays accessed later, early exit avoids
	 * unnecessary returning of other data
	 */
	if (!tsk->delays)
		goto done;

	tmp = (s64)d->cpu_run_real_total;
	cputime_to_timespec(tsk->utime + tsk->stime, &ts);
	tmp += timespec_to_ns(&ts);
	d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;

	tmp = (s64)d->cpu_scaled_run_real_total;
	cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts);
	tmp += timespec_to_ns(&ts);
	d->cpu_scaled_run_real_total =
		(tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;

	/*
	 * No locking available for sched_info (and too expensive to add one)
	 * Mitigate by taking snapshot of values
	 */
	t1 = tsk->sched_info.pcount;
	t2 = tsk->sched_info.run_delay;
	t3 = tsk->se.sum_exec_runtime;

	d->cpu_count += t1;

	tmp = (s64)d->cpu_delay_total + t2;
	d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;

	tmp = (s64)d->cpu_run_virtual_total + t3;
	d->cpu_run_virtual_total =
		(tmp < (s64)d->cpu_run_virtual_total) ?	0 : tmp;

	/* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */

	spin_lock_irqsave(&tsk->delays->lock, flags);
	tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
	d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
	tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
	d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
	tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
	d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
	d->blkio_count += tsk->delays->blkio_count;
	d->swapin_count += tsk->delays->swapin_count;
	d->freepages_count += tsk->delays->freepages_count;
	spin_unlock_irqrestore(&tsk->delays->lock, flags);

done:
	return 0;
}
示例#3
0
文件: edf-wm.c 项目: Aand1/ROSCH
static void migration_thread(void *__data)
{
	int cpu = (long) __data;
	edf_wm_task_t *et;
	struct timespec ts;

	set_current_state(TASK_INTERRUPTIBLE);
	while (!kthread_should_stop()) {
		spin_lock_irq(&kthread[cpu].lock);
		if (list_empty(&kthread[cpu].list)) {
			spin_unlock_irq(&kthread[cpu].lock);
			schedule();
			set_current_state(TASK_INTERRUPTIBLE);
			continue;
		}

		/* get a task in the list by fifo. */
		et = list_first_entry(&kthread[cpu].list, 
							  edf_wm_task_t,
							  migration_list);
		list_del_init(&et->migration_list);
		spin_unlock_irq(&kthread[cpu].lock);

		/* account runtime. */
		jiffies_to_timespec(et->runtime[cpu], &ts);
		et->rt->task->dl.sched_runtime = timespec_to_ns(&ts);

		/* trace precise deadlines. */
		et->rt->deadline_time += et->deadline;
		et->rt->task->dl.sched_deadline = et->sched_split_deadline;
		et->rt->task->dl.deadline = et->next_release;
		et->next_release += et->sched_split_deadline;

		/* now let's migrate the task! */
		et->rt->task->dl.flags |= DL_NEW;
		migrate_task(et->rt, cpu);
		wake_up_process(et->rt->task);

		/* when the budget is exhausted, the deadline should be added by
		   et->sched_deadline but not by et->sched_split_deadline. */
		et->rt->task->dl.sched_deadline = et->sched_deadline;

		/* account runtime. */
		jiffies_to_timespec(et->runtime[cpu], &ts);
		et->rt->task->dl.runtime = timespec_to_ns(&ts);

		/* activate the timer for the next migration of this task. */
		if (et->last_cpu != cpu) {
			et->rt->task->dl.flags &= ~SCHED_EXHAUSTIVE;
			start_window_timer(et);
		}
		else {
			et->rt->task->dl.flags |= SCHED_EXHAUSTIVE;
		}
	}
}
示例#4
0
u8 XTIER_inject_reinject(struct kvm_vcpu *vcpu)
{
	struct timespec now;
	u32 rflags = 0;

	if(!_XTIER_inject_current_injection.code_len || !_XTIER_inject_current_injection.code)
		return 0;

	PRINT_DEBUG_FULL("Checking for reinjection...\n");

	// Do not reinject in case interrupts are disabled, pending or the fpu is active
	XTIER_read_vmcs(GUEST_RFLAGS, &rflags);

	if(!(rflags & (1UL << 9)) || vcpu->fpu_active)
	{
		return 0;
	}

	// Check for a request to reinject the module.
	if(_XTIER_inject.reinject == 1)
	{
		// Inject on the next entry
		PRINT_DEBUG("Will reinject on the next entry!\n");
		_XTIER_inject.reinject = 2;
	}
	else if(_XTIER_inject.reinject == 2)
	{
		// Reset and Reeinject
		PRINT_DEBUG("Will reinject now!\n");
		_XTIER_inject.reinject = 0;
		return 1;
	}

	// Check for auto_injection
	if(!(_XTIER.mode & XTIER_CODE_INJECTION) &&
			!_XTIER_inject_current_injection.event_based &&
		    _XTIER_inject_current_injection.auto_inject > 0)
	{
		return 1;
	}

	// Check for time-based injection
	getnstimeofday(&now);

	if(!(_XTIER.mode & XTIER_CODE_INJECTION) &&
	    !_XTIER_inject_current_injection.event_based &&
		_XTIER_inject_current_injection.time_inject &&
		_XTIER_inject_current_injection.time_inject <=
		((timespec_to_ns(&now) - timespec_to_ns(&_endtime)) / NSEC_PER_SEC))
	{
		return 1;
	}

	return 0;
}
示例#5
0
文件: pm_cpuidle.c 项目: mozyg/kernel
static void clock_bail_trace(const char *s, u32 cond, int id)
{
	struct timespec ts_now;
	s64 now;
	static int last_id = 0;
	static s64 last_state_change = 0;

	getnstimeofday(&ts_now);
	now = timespec_to_ns(&ts_now);

	/* Update the total and sleep time for the current entry. Doing this
	 * here esnures that we have a consistent entry even if there was no
	 * state change yet.
	 */
	if (last_state_change != 0) {
		cbt_list[cbt_idx].total_residency = (now - last_state_change);
		cbt_list[cbt_idx].sleep_residency =
					cbt_list[cbt_idx].total_residency -
					cbt_list[cbt_idx].wake_residency;
	}

	if (id == last_id) {
		/* We woke up and came back here, but the bail condition has
		 * not changed. We add the time we were awake during this
		 * condition.
		 */
		s64 last_wake_up = timespec_to_ns(&ts_last_wake_up);

		cbt_list[cbt_idx].wake_residency += (now - last_wake_up);

		/* Count wake-up event.
		 */
		cbt_list[cbt_idx].num_wakeups++;
		return;
	}

	/* Record state change time.
	 */
	last_state_change = now;
	last_id = id;

	/* Switch to next entry, set first_entry timestamp and clear out the
	 * rest of the entry.
	 */
	cbt_idx = (cbt_idx +1) % CLOCK_BAIL_TRACE_MAX;

	cbt_list[cbt_idx].first_entry = now;
	cbt_list[cbt_idx].desc        = s;
	cbt_list[cbt_idx].cond        = cond;
	cbt_list[cbt_idx].total_residency = 0;
	cbt_list[cbt_idx].sleep_residency = 0;
	cbt_list[cbt_idx].wake_residency  = 0;
	cbt_list[cbt_idx].num_wakeups     = 0;
}
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
			   const char *buf, size_t n)
{
	suspend_state_t state;
	struct timespec ts_entry, ts_exit;
	u64 elapsed_msecs64;
	u32 elapsed_msecs32;
	int error;

	error = pm_autosleep_lock();
	if (error)
		return error;

	if (pm_autosleep_state() > PM_SUSPEND_ON) {
		error = -EBUSY;
		goto out;
	}

	state = decode_state(buf, n);
	if (state < PM_SUSPEND_MAX) {
		/*
		 * We want to prevent system from frequent periodic wake-ups
		 * when sleeping time is less or equival certain interval.
		 * It's done in order to save power in certain cases, one of
		 * the examples is GPS tracking, but not only.
		 */
		getnstimeofday(&ts_entry);
		error = pm_suspend(state);
		getnstimeofday(&ts_exit);

		elapsed_msecs64 = timespec_to_ns(&ts_exit) -
			timespec_to_ns(&ts_entry);
		do_div(elapsed_msecs64, NSEC_PER_MSEC);
		elapsed_msecs32 = elapsed_msecs64;

		if (elapsed_msecs32 <= SBO_SLEEP_MSEC) {
			if (suspend_short_count == SBO_CNT)
				suspend_backoff();
			else
				suspend_short_count++;
		} else {
			suspend_short_count = 0;
		}
	} else if (state == PM_SUSPEND_MAX)
		error = hibernate();
	else
		error = -EINVAL;

 out:
	pm_autosleep_unlock();
	return error ? error : n;
}
示例#7
0
window_stats *census_window_stats_create(int nintervals,
                                         const gpr_timespec intervals[],
                                         int granularity,
                                         const cws_stat_info *stat_info) {
  window_stats *ret;
  int i;
  /* validate inputs */
  GPR_ASSERT(nintervals > 0 && granularity > 2 && intervals != NULL &&
             stat_info != NULL);
  for (i = 0; i < nintervals; i++) {
    int64_t ns = timespec_to_ns(intervals[i]);
    GPR_ASSERT(intervals[i].tv_sec >= 0 && intervals[i].tv_nsec >= 0 &&
               intervals[i].tv_nsec < GPR_NS_PER_SEC && ns >= 100 &&
               granularity * 10 <= ns);
  }
  /* Allocate and initialize relevant data structures */
  ret = (window_stats *)gpr_malloc(sizeof(window_stats));
  ret->nintervals = nintervals;
  ret->nbuckets = granularity + 1;
  ret->stat_info = *stat_info;
  ret->interval_stats =
      (cws_interval_stats *)gpr_malloc(nintervals * sizeof(cws_interval_stats));
  for (i = 0; i < nintervals; i++) {
    int64_t size_ns = timespec_to_ns(intervals[i]);
    cws_interval_stats *is = ret->interval_stats + i;
    cws_bucket *buckets = is->buckets =
        (cws_bucket *)gpr_malloc(ret->nbuckets * sizeof(cws_bucket));
    int b;
    for (b = 0; b < ret->nbuckets; b++) {
      buckets[b].statistic = cws_create_statistic(stat_info);
      buckets[b].count = 0;
    }
    is->bottom_bucket = 0;
    is->bottom = 0;
    is->width = size_ns / granularity;
    /* Check for possible overflow issues, and maximize interval size if the
       user requested something large enough. */
    if ((GPR_INT64_MAX - is->width) > size_ns) {
      is->top = size_ns + is->width;
    } else {
      is->top = GPR_INT64_MAX;
      is->width = GPR_INT64_MAX / (granularity + 1);
    }
    /* If size doesn't divide evenly, we can have a width slightly too small;
       better to have it slightly large. */
    if ((size_ns - (granularity + 1) * is->width) > 0) {
      is->width += 1;
    }
  }
  ret->newest_time = 0;
  return ret;
}
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
{
	struct cn_msg *msg;
	struct proc_event *ev;
	struct timespec ts;
	__u8 buffer[CN_PROC_MSG_SIZE];

	if (atomic_read(&proc_event_num_listeners) < 1)
		return;

	msg = (struct cn_msg *)buffer;
	ev = (struct proc_event *)msg->data;
	get_seq(&msg->seq, &ev->cpu);
	ktime_get_ts(&ts); /* get high res monotonic timestamp */
	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
	ev->what = PROC_EVENT_PTRACE;
	ev->event_data.ptrace.process_pid  = task->pid;
	ev->event_data.ptrace.process_tgid = task->tgid;
	if (ptrace_id == PTRACE_ATTACH) {
		ev->event_data.ptrace.tracer_pid  = current->pid;
		ev->event_data.ptrace.tracer_tgid = current->tgid;
	} else if (ptrace_id == PTRACE_DETACH) {
		ev->event_data.ptrace.tracer_pid  = 0;
		ev->event_data.ptrace.tracer_tgid = 0;
	} else
		return;

	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
	msg->ack = 0; /* not used */
	msg->len = sizeof(*ev);
	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
void proc_exit_connector(struct task_struct *task)
{
	struct cn_msg *msg;
	struct proc_event *ev;
	__u8 buffer[CN_PROC_MSG_SIZE];
	struct timespec ts;

	if (atomic_read(&proc_event_num_listeners) < 1)
		return;

	msg = (struct cn_msg*)buffer;
	ev = (struct proc_event*)msg->data;
	get_seq(&msg->seq, &ev->cpu);
	ktime_get_ts(&ts); /* get high res monotonic timestamp */
	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
	ev->what = PROC_EVENT_EXIT;
	ev->event_data.exit.process_pid = task->pid;
	ev->event_data.exit.process_tgid = task->tgid;
	ev->event_data.exit.exit_code = task->exit_code;
	ev->event_data.exit.exit_signal = task->exit_signal;

	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
	msg->ack = 0; /* not used */
	msg->len = sizeof(*ev);
	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
示例#10
0
static int do_pri_inode(aufs_bindex_t bindex, struct inode *inode, int hn,
			struct dentry *wh)
{
	char *n = NULL;
	int l = 0;

	if (!inode || IS_ERR(inode)) {
		dpri("i%d: err %ld\n", bindex, PTR_ERR(inode));
		return -1;
	}

	/* the type of i_blocks depends upon CONFIG_LBDAF */
	BUILD_BUG_ON(sizeof(inode->i_blocks) != sizeof(unsigned long)
		     && sizeof(inode->i_blocks) != sizeof(u64));
	if (wh) {
		n = (void *)wh->d_name.name;
		l = wh->d_name.len;
	}

	dpri("i%d: %p, i%lu, %s, cnt %d, nl %u, 0%o, sz %llu, blk %llu,"
	     " hn %d, ct %lld, np %lu, st 0x%lx, f 0x%x, v %llu, g %x%s%.*s\n",
	     bindex, inode,
	     inode->i_ino, inode->i_sb ? au_sbtype(inode->i_sb) : "??",
	     atomic_read(&inode->i_count), inode->i_nlink, inode->i_mode,
	     i_size_read(inode), (unsigned long long)inode->i_blocks,
	     hn, (long long)timespec_to_ns(&inode->i_ctime) & 0x0ffff,
	     inode->i_mapping ? inode->i_mapping->nrpages : 0,
	     inode->i_state, inode->i_flags, inode->i_version,
	     inode->i_generation,
	     l ? ", wh " : "", l, n);
	return 0;
}
示例#11
0
文件: hrt.c 项目: FrozenCow/FIRE-ICE
static inline u64 get_posix_clock_monotonic_time(void)
{
	struct timespec ts;

	do_posix_clock_monotonic_gettime(&ts);
	return timespec_to_ns(&ts);
}
示例#12
0
void census_window_stats_add(window_stats *wstats, const gpr_timespec when,
                             const void *stat_value) {
  int i;
  int64_t when_ns = timespec_to_ns(when);
  GPR_ASSERT(wstats->interval_stats != NULL);
  for (i = 0; i < wstats->nintervals; i++) {
    cws_interval_stats *is = wstats->interval_stats + i;
    cws_bucket *bucket;
    if (when_ns < is->bottom) { /* Below smallest time in interval: drop */
      continue;
    }
    if (when_ns >= is->top) { /* above limit: shift buckets */
      cws_shift_buckets(wstats, is, when_ns);
    }
    /* Add the stat. */
    GPR_ASSERT(is->bottom <= when_ns && when_ns < is->top);
    bucket = is->buckets +
             BUCKET_IDX(is, (when_ns - is->bottom) / is->width, wstats);
    bucket->count++;
    wstats->stat_info.stat_add(bucket->statistic, stat_value);
  }
  if (when_ns > wstats->newest_time) {
    wstats->newest_time = when_ns;
  }
}
示例#13
0
void proc_exec_connector(struct task_struct *task)
{
	struct cn_msg *msg;
	struct proc_event *ev;
	struct timespec ts;
	__u8 buffer[CN_PROC_MSG_SIZE];
    const struct cred *cred;

	if (atomic_read(&proc_event_num_listeners) < 1)
		return;

	msg = (struct cn_msg*)buffer;
	ev = (struct proc_event*)msg->data;
	get_seq(&msg->seq, &ev->cpu);
	ktime_get_ts(&ts); /* get high res monotonic timestamp */
	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
	ev->what = PROC_EVENT_EXEC;
	ev->event_data.exec.process_pid = task->pid;
	ev->event_data.exec.process_tgid = task->tgid;

    rcu_read_lock();
    cred = __task_cred(task);
    ev->event_data.exec.process_euid = cred->euid;
    ev->event_data.exec.process_egid = cred->egid;
    rcu_read_unlock();

    proc_get_exe(task, ev->event_data.exec.exe);

	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
	msg->ack = 0; /* not used */
	msg->len = sizeof(*ev);
	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
示例#14
0
static int gator_events_mmapped_read(int **buffer, bool sched_switch)
{
    int i;
    int len = 0;
    int delta_in_us;
    struct timespec ts;
    s64 time;

    /* System wide counters - read from one core only */
    if (!on_primary_core() || !mmapped_global_enabled)
        return 0;

    getnstimeofday(&ts);
    time = timespec_to_ns(&ts);
    delta_in_us = (int)(time - prev_time) / 1000;
    prev_time = time;

    for (i = 0; i < MMAPPED_COUNTERS_NUM; i++) {
        if (mmapped_counters[i].enabled) {
            mmapped_buffer[len++] = mmapped_counters[i].key;
            mmapped_buffer[len++] =
                mmapped_simulate(i, delta_in_us);
        }
    }

    if (buffer)
        *buffer = mmapped_buffer;

    return len;
}
示例#15
0
void proc_fork_connector(struct task_struct *task)
{
	struct cn_msg *msg;
	struct proc_event *ev;
	__u8 buffer[CN_PROC_MSG_SIZE];
	struct timespec ts;
	struct task_struct *parent;

	if (atomic_read(&proc_event_num_listeners) < 1)
		return;

	msg = (struct cn_msg*)buffer;
	ev = (struct proc_event*)msg->data;
	get_seq(&msg->seq, &ev->cpu);
	ktime_get_ts(&ts); /* get high res monotonic timestamp */
	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
	ev->what = PROC_EVENT_FORK;
	rcu_read_lock();
	parent = rcu_dereference(task->real_parent);
	ev->event_data.fork.parent_pid = parent->pid;
	ev->event_data.fork.parent_tgid = parent->tgid;
	rcu_read_unlock();
	ev->event_data.fork.child_pid = task->pid;
	ev->event_data.fork.child_tgid = task->tgid;

	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
	msg->ack = 0; /* not used */
	msg->len = sizeof(*ev);
	/*  If cn_netlink_send() failed, the data is not sent */
	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
示例#16
0
/*
 * Send an acknowledgement message to userspace
 *
 * Use 0 for success, EFOO otherwise.
 * Note: this is the negative of conventional kernel error
 * values because it's not being returned via syscall return
 * mechanisms.
 */
static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
{
	struct cn_msg *msg;
	struct proc_event *ev;
	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
	struct timespec ts;

	if (atomic_read(&proc_event_num_listeners) < 1)
		return;

	msg = buffer_to_cn_msg(buffer);
	ev = (struct proc_event *)msg->data;
	memset(&ev->event_data, 0, sizeof(ev->event_data));
	msg->seq = rcvd_seq;
	ktime_get_ts(&ts); /* get high res monotonic timestamp */
	ev->timestamp_ns = timespec_to_ns(&ts);
	ev->cpu = -1;
	ev->what = PROC_EVENT_NONE;
	ev->event_data.ack.err = err;
	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
	msg->ack = rcvd_ack + 1;
	msg->len = sizeof(*ev);
	msg->flags = 0; /* not used */
	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
示例#17
0
void proc_coredump_connector(struct task_struct *task)
{
	struct cn_msg *msg;
	struct proc_event *ev;
	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
	struct timespec ts;

	if (atomic_read(&proc_event_num_listeners) < 1)
		return;

	msg = buffer_to_cn_msg(buffer);
	ev = (struct proc_event *)msg->data;
	memset(&ev->event_data, 0, sizeof(ev->event_data));
	get_seq(&msg->seq, &ev->cpu);
	ktime_get_ts(&ts); /* get high res monotonic timestamp */
	ev->timestamp_ns = timespec_to_ns(&ts);
	ev->what = PROC_EVENT_COREDUMP;
	ev->event_data.coredump.process_pid = task->pid;
	ev->event_data.coredump.process_tgid = task->tgid;

	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
	msg->ack = 0; /* not used */
	msg->len = sizeof(*ev);
	msg->flags = 0; /* not used */
	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
示例#18
0
static enum hrtimer_restart hrtimer_notify(struct hrtimer *hrtimer)
{
	struct timespec ts;
	s64 timestamp;
	s64 last_hrtimer;
	int cpu;

	cpu = smp_processor_id();
	hrtimer_forward(hrtimer, per_cpu(hrtimer_expire, cpu), interval);
	per_cpu(hrtimer_expire, cpu) = ktime_add(per_cpu(hrtimer_expire, cpu), interval);

	getnstimeofday(&ts);
	timestamp = timespec_to_ns(&ts);
	last_hrtimer = per_cpu(hrtimer_last_hrtimer, cpu);
	per_cpu(hrtimer_last_hrtimer, cpu) = timestamp;

	if (last_hrtimer == 0) {
		/* Set the initial value */
		per_cpu(percpu_timestamp, cpu) = timestamp;
	} else {
		s64 delta = timestamp - last_hrtimer;

		per_cpu(hrtimer_count, cpu)++;
		if (delta < NSEC_PER_MSEC/2)
			per_cpu(hrtimer_fast, cpu)++;
		else if (delta > 2*NSEC_PER_MSEC)
			per_cpu(hrtimer_slow, cpu)++;
		else
			per_cpu(hrtimer_ok, cpu)++;

		if (per_cpu(hrtimer_count, cpu) % 1000 == 0) {
			const char *result;
			s64 last_timestamp = per_cpu(percpu_timestamp, cpu);
			s64 jitter = timestamp - last_timestamp - NSEC_PER_SEC;

			if (jitter < 0)
				jitter = -jitter;

			if ((per_cpu(hrtimer_ok, cpu) >= 800) &&
			    (jitter < NSEC_PER_SEC/10))
				result = "pass";
			else
				result = "fail";

			pr_err("core: %d hrtimer: %s (jitter %lld, too fast %d, ok %d, too slow %d)\n",
			       cpu, result, jitter, per_cpu(hrtimer_fast, cpu),
			       per_cpu(hrtimer_ok, cpu), per_cpu(hrtimer_slow,
								 cpu));

			per_cpu(hrtimer_fast, cpu) = 0;
			per_cpu(hrtimer_ok, cpu) = 0;
			per_cpu(hrtimer_slow, cpu) = 0;

			per_cpu(percpu_timestamp, cpu) = timestamp;
		}
	}

	return HRTIMER_RESTART;
}
示例#19
0
static void init_packet(packet_t* packet){
  static unsigned int id = 0;
  static s64 time = 0;
  struct timespec ts;
  s64 creation_timestamp;

  packet->id = id;
  packet->accumulated_time = 0;

  clock_gettime(CLOCK_REALTIME, &ts);
  creation_timestamp = timespec_to_ns(ts);
  time = timespec_to_ns(ts);
  memcpy(& (packet->in_time), & creation_timestamp, sizeof(s64));
  
  printf("%lld\n", packet->in_time);
  id++;
}
示例#20
0
u32 b2r2_get_curr_nsec(void)
{
	struct timespec ts;

	getrawmonotonic(&ts);

	return (u32)timespec_to_ns(&ts);
}
static inline s64 yas_iio_get_boottime_ns(void)
{
	struct timespec ts;

	ts = ktime_to_timespec(ktime_get_boottime());

	return timespec_to_ns(&ts);
}
示例#22
0
/**
 * _omap_device_deactivate - decrease device readiness
 * @od: struct omap_device *
 * @ignore_lat: decrease to latency target (0) or full inactivity (1)?
 *
 * Decrease readiness of omap_device @od (thus increasing device
 * wakeup latency, but conserving power).  If @ignore_lat is
 * IGNORE_WAKEUP_LAT, make the omap_device fully inactive.  Otherwise,
 * if @ignore_lat is USE_WAKEUP_LAT, and the device's maximum wakeup
 * latency is less than the requested maximum wakeup latency, step
 * forwards in the omap_device_pm_latency table to ensure the device's
 * maximum wakeup latency is less than or equal to the requested
 * maximum wakeup latency.  Returns 0.
 */
static int _omap_device_deactivate(struct omap_device *od, u8 ignore_lat)
{
	struct timespec a, b, c;

	pr_debug("omap_device: %s: deactivating\n", od->pdev.name);

	while (od->pm_lat_level < od->pm_lats_cnt) {
		struct omap_device_pm_latency *odpl;
		unsigned long long deact_lat = 0;

		odpl = od->pm_lats + od->pm_lat_level;

		if (!ignore_lat &&
		    ((od->dev_wakeup_lat + odpl->activate_lat) >
		     od->_dev_wakeup_lat_limit))
			break;

		read_persistent_clock(&a);

		/* XXX check return code */
		odpl->deactivate_func(od);

		read_persistent_clock(&b);

		c = timespec_sub(b, a);
		deact_lat = timespec_to_ns(&c);

		pr_debug("omap_device: %s: pm_lat %d: deactivate: elapsed time "
			 "%llu nsec\n", od->pdev.name, od->pm_lat_level,
			 deact_lat);

		if (deact_lat > odpl->deactivate_lat) {
			odpl->deactivate_lat_worst = deact_lat;
			if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) {
				odpl->deactivate_lat = deact_lat;
				pr_warning("omap_device: %s.%d: new worst case "
					   "deactivate latency %d: %llu\n",
					   od->pdev.name, od->pdev.id,
					   od->pm_lat_level, deact_lat);
			} else
				pr_warning("omap_device: %s.%d: deactivate "
					   "latency %d higher than exptected. "
					   "(%llu > %d)\n",
					   od->pdev.name, od->pdev.id,
					   od->pm_lat_level, deact_lat,
					   odpl->deactivate_lat);
		}


		od->dev_wakeup_lat += odpl->activate_lat;

		od->pm_lat_level++;
	}

	return 0;
}
示例#23
0
static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
{
	struct timespec ts;
	s64 cpu_ns;

	cputime_to_timespec(ct, &ts);
	cpu_ns = timespec_to_ns(&ts);

	return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
}
示例#24
0
static void read_gpio_accurate (void) {

    int x = 0;
    struct timespec start_time, end_time, current_time;


    dbg("");	


    // IRQs will mess up our sample times so turn them off.
    local_irq_disable();
    local_fiq_disable();

    // time this bad boy
    getnstimeofday(&start_time);

    // get the data for the whole first 32 gpio pins & figure out what we want later
    for(x = 0; x < piScopinatorSampleSize; x++) {
        collected_data[x] = GPIO_READ_ALL;
        getnstimeofday(&current_time);
        collection_times[x] = timespec_to_ns(&current_time);
    }

    // end time
    getnstimeofday(&end_time);

    // even though the functions say nano seconds it won't really be nano second resolution since our pi 
    // isn't that fast.  Oh well

    collected_dataTime = timespec_to_ns(&end_time) - timespec_to_ns(&start_time);

    // don't forget to reactivate IRQ
    local_fiq_enable();
    local_irq_enable();    

    // We are going to be outputting in pages so setting up a pointer and a 
    // counter so we know when we are done.
    //dataPointer = (int*)&collected_data;
    data_pointer_count = 0;

    data_ready = 1;
}
示例#25
0
文件: rmt-ps-cas.c 项目: IRATI/stack
static int update_cycles(struct reg_cycle_t * prev_cycle,
		  	 struct reg_cycle_t * cur_cycle,
			 ssize_t cur_qlen,
			 bool enqueue)
{
	ssize_t		end_len;
        struct timespec	t_sub;
        s64		t_sub_ns;

	end_len = 0;
	if (!enqueue) {
		if (!cur_qlen)
			return -1;
		end_len = 1;
	}

        else if (cur_qlen == end_len) {
                /* end cycle */
		getnstimeofday(&cur_cycle->t_end);
		t_sub = timespec_sub(cur_cycle->t_end, cur_cycle->t_last_start);
		cur_cycle->sum_area += (ulong) cur_qlen * timespec_to_ns(&t_sub);
		cur_cycle->t_last_start = cur_cycle->t_end;
        } else {
                /* middle cycle */
                cur_cycle->t_last_start = cur_cycle->t_end;
                getnstimeofday(&cur_cycle->t_end);
                t_sub = timespec_sub(cur_cycle->t_end, cur_cycle->t_last_start);
                cur_cycle->sum_area +=(ulong) cur_qlen * timespec_to_ns(&t_sub);
        }

        t_sub = timespec_sub(cur_cycle->t_end, prev_cycle->t_start);
	t_sub_ns = timespec_to_ns(&t_sub);
        cur_cycle->avg_len = (cur_cycle->sum_area + prev_cycle->sum_area);

	if (t_sub_ns <= 0) {
		LOG_ERR("Time delta is <= 0!");
		return -1;
	}

	cur_cycle->avg_len /= (ulong) abs(t_sub_ns);
	return 0;
}
示例#26
0
/**
 * i40e_ptp_write - Write the PHC time to the device
 * @pf: Board private structure
 * @ts: timespec structure that holds the new time value
 *
 * This function writes the PRTTSYN_TIME registers with the user value. Since
 * we receive a timespec from the stack, we must convert that timespec into
 * nanoseconds before programming the registers.
 **/
static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts)
{
	struct i40e_hw *hw = &pf->hw;
	u64 ns = timespec_to_ns(ts);

	/* The timer will not update until the high register is written, so
	 * write the low register first.
	 */
	wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF);
	wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32);
}
示例#27
0
static inline uint64_t
perf_get_timestamp(void)
{
	struct timespec ts;
	int ret;

	ret = clock_gettime(perf_clk_id, &ts);
	if (ret)
		return 0;

	return timespec_to_ns(&ts);
}
示例#28
0
/**
 * _omap_device_activate - increase device readiness
 * @od: struct omap_device *
 * @ignore_lat: increase to latency target (0) or full readiness (1)?
 *
 * Increase readiness of omap_device @od (thus decreasing device
 * wakeup latency, but consuming more power).  If @ignore_lat is
 * IGNORE_WAKEUP_LAT, make the omap_device fully active.  Otherwise,
 * if @ignore_lat is USE_WAKEUP_LAT, and the device's maximum wakeup
 * latency is greater than the requested maximum wakeup latency, step
 * backwards in the omap_device_pm_latency table to ensure the
 * device's maximum wakeup latency is less than or equal to the
 * requested maximum wakeup latency.  Returns 0.
 */
static int _omap_device_activate(struct omap_device *od, u8 ignore_lat)
{
	struct timespec a, b, c;

	dev_dbg(&od->pdev->dev, "omap_device: activating\n");

	while (od->pm_lat_level > 0) {
		struct omap_device_pm_latency *odpl;
		unsigned long long act_lat = 0;

		od->pm_lat_level--;

		odpl = od->pm_lats + od->pm_lat_level;

		if (!ignore_lat &&
		    (od->dev_wakeup_lat <= od->_dev_wakeup_lat_limit))
			break;

		read_persistent_clock(&a);

		/* XXX check return code */
		odpl->activate_func(od);

		read_persistent_clock(&b);

		c = timespec_sub(b, a);
		act_lat = timespec_to_ns(&c);

		dev_dbg(&od->pdev->dev,
			"omap_device: pm_lat %d: activate: elapsed time "
			"%llu nsec\n", od->pm_lat_level, act_lat);

		if (act_lat > odpl->activate_lat) {
			odpl->activate_lat_worst = act_lat;
			if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) {
				odpl->activate_lat = act_lat;
				dev_dbg(&od->pdev->dev,
					"new worst case activate latency "
					"%d: %llu\n",
					od->pm_lat_level, act_lat);
			} else
				dev_warn(&od->pdev->dev,
					 "activate latency %d "
					 "higher than exptected. (%llu > %d)\n",
					 od->pm_lat_level, act_lat,
					 odpl->activate_lat);
		}

		od->dev_wakeup_lat -= odpl->activate_lat;
	}

	return 0;
}
示例#29
0
static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters)
{
	int min = 0, max = 0, fail_count = 0;
	uint64_t sum = 0;
	uint64_t avg;
	int i;
	/* Allow udelay to be up to 0.5% fast */
	int allowed_error_ns = usecs * 5;

	for (i = 0; i < iters; ++i) {
		struct timespec ts1, ts2;
		int time_passed;

		ktime_get_ts(&ts1);
		udelay(usecs);
		ktime_get_ts(&ts2);
		time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1);

		if (i == 0 || time_passed < min)
			min = time_passed;
		if (i == 0 || time_passed > max)
			max = time_passed;
		if ((time_passed + allowed_error_ns) / 1000 < usecs)
			++fail_count;
		WARN_ON(time_passed < 0);
		sum += time_passed;
	}

	avg = sum;
	do_div(avg, iters);
	seq_printf(s, "%d usecs x %d: exp=%d allowed=%d min=%d avg=%lld max=%d",
			usecs, iters, usecs * 1000,
			(usecs * 1000) - allowed_error_ns, min, avg, max);
	if (fail_count)
		seq_printf(s, " FAIL=%d", fail_count);
	seq_puts(s, "\n");

	return 0;
}
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 mpu_state = cx->mpu_state, core_state = cx->core_state;

	current_cx_state = *cx;

	/* Used to keep track of the total time in idle */
	getnstimeofday(&ts_preidle);

	local_irq_disable();
	local_fiq_disable();

	if (!enable_off_mode) {
		if (mpu_state < PWRDM_POWER_RET)
			mpu_state = PWRDM_POWER_RET;
		if (core_state < PWRDM_POWER_RET)
			core_state = PWRDM_POWER_RET;
	}

	pwrdm_set_next_pwrst(mpu_pd, mpu_state);
	pwrdm_set_next_pwrst(core_pd, core_state);

	if (omap_irq_pending() || need_resched())
		goto return_sleep_time;

	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}

	/* Execute ARM wfi */
	omap_sram_idle();

	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
	}

return_sleep_time:
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	local_irq_enable();
	local_fiq_enable();

	return (u32)timespec_to_ns(&ts_idle)/1000;
}