Ejemplo n.º 1
0
void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
{
	uint64_t now, duration, last;

	spin_lock(&stats->lock);

	now		= local_clock();
	duration	= time_after64(now, start_time)
		? now - start_time : 0;
	last		= time_after64(now, stats->last)
		? now - stats->last : 0;

	stats->max_duration = max(stats->max_duration, duration);

	if (stats->last) {
		ewma_add(stats->average_duration, duration, 8, 8);

		if (stats->average_frequency)
			ewma_add(stats->average_frequency, last, 8, 8);
		else
			stats->average_frequency  = last << 8;
	} else {
		stats->average_duration  = duration << 8;
	}

	stats->last = now ?: 1;

	spin_unlock(&stats->lock);
}
Ejemplo n.º 2
0
static int sec_tsp_log_timestamp(unsigned int idx)
{
	/* Add the current time stamp */
	char tbuf[50];
	unsigned tlen;
	unsigned long long t;
	unsigned long nanosec_rem;

	t = local_clock();
	nanosec_rem = do_div(t, 1000000000);
	tlen = sprintf(tbuf, "[%5lu.%06lu] ",
			(unsigned long) t,
			nanosec_rem / 1000);

	/* Overflow buffer size */
	if (idx + tlen > sec_tsp_log_size - 1) {
		tlen = scnprintf(&sec_tsp_log_buf[0],
						tlen + 1, "%s", tbuf);
		*sec_tsp_log_ptr = tlen;
	} else {
		tlen = scnprintf(&sec_tsp_log_buf[idx], tlen + 1, "%s", tbuf);
		*sec_tsp_log_ptr += tlen;
	}

	return *sec_tsp_log_ptr;
}
Ejemplo n.º 3
0
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
    struct task_cputime cputime = {
        .sum_exec_runtime = p->se.sum_exec_runtime,
    };

    task_cputime(p, &cputime.utime, &cputime.stime);
    cputime_adjust(&cputime, &p->prev_cputime, ut, st);
}

/*
 * Must be called with siglock held.
 */
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
    struct task_cputime cputime;

    thread_group_cputime(p, &cputime);
    cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
}
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */

#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static unsigned long long vtime_delta(struct task_struct *tsk)
{
    unsigned long long clock;

    clock = local_clock();
    if (clock < tsk->vtime_snap)
        return 0;

    return clock - tsk->vtime_snap;
}
Ejemplo n.º 4
0
void kmsgdump_write(char *fmt, ...)
{
	char buf[256] = {0,};
	va_list args;
	int len;

	va_start(args, fmt);
	len = vsprintf(buf, fmt, args);
	va_end(args);

	if((len>=256) || (len<0))
		buf[256-1] = '\n';

#if !defined (NM73131_0_BOARD)
#define LOG_TIME_STAMP //NM73131 is not support
#endif

#ifdef LOG_TIME_STAMP
	{
		char buf_t[30] = {0,};
		unsigned long rem_nsec;
		u64 ts = local_clock();
		
		rem_nsec = do_div(ts, 1000000000);
		sprintf(buf_t, "[%5lu.%06lu] ", (unsigned long)ts, rem_nsec / 1000);

		queue_write(&g_dumpqueue, (void*)buf_t, strlen(buf_t));
	}
#endif

	queue_write(&g_dumpqueue, (void*)buf, strlen(buf));
	wake_up(&atwilc_msgdump_waitqueue);

}
Ejemplo n.º 5
0
static void ringbuf_store(enum ringbuf_flags flags, const char *text, u16 text_len,
		      u8 cpu_id, struct task_struct *owner)
{
	struct ringbuf *msg;
	u32 size, pad_len;

	
	#ifdef	DEBUG_STLOG
	printk("[STLOG] %s stlog_seq %llu stlog_idx %lu ringbuf_first_seq %llu ringbuf_first_idx %lu ringbuf_next_seq %llu ringbuf_next_idx %lu \n",
			__func__,stlog_seq,stlog_idx,ringbuf_first_seq,ringbuf_first_idx,ringbuf_next_seq,ringbuf_next_idx);
	#endif
	

	/* number of '\0' padding bytes to next message */
	size = sizeof(struct ringbuf) + text_len;
	pad_len = (-size) & (RINGBUF_ALIGN - 1);
	size += pad_len;

	while (ringbuf_first_seq < ringbuf_next_seq) {
		u32 free;

		if (ringbuf_next_idx > ringbuf_first_idx)
			free = max(ringbuf_buf_len - ringbuf_next_idx, ringbuf_first_idx);
		else
			free = ringbuf_first_idx - ringbuf_next_idx;

		if (free > size + sizeof(struct ringbuf))
			break;

		/* drop old messages until we have enough space */
		ringbuf_first_idx = ringbuf_next(ringbuf_first_idx);
		ringbuf_first_seq++;
	}

	if (ringbuf_next_idx + size + sizeof(struct ringbuf) >= ringbuf_buf_len) {
		memset(ringbuf_buf + ringbuf_next_idx, 0, sizeof(struct ringbuf));
		ringbuf_next_idx = 0;
	}

	/* fill message */
	msg = (struct ringbuf *)(ringbuf_buf + ringbuf_next_idx);
	memcpy(ringbuf_text(msg), text, text_len);
	msg->text_len = text_len;
	msg->flags = flags & 0x1f;
#ifdef CONFIG_STLOG_CPU_ID
	msg->cpu_id = cpu_id;
#endif
#ifdef CONFIG_STLOG_PID
	msg->pid = owner->pid;
	memcpy(msg->comm, owner->comm, TASK_COMM_LEN);
#endif
	msg->ts_nsec = local_clock();
	msg->len = sizeof(struct ringbuf) + text_len + pad_len;

	/* insert message */
	ringbuf_next_idx += msg->len;
	ringbuf_next_seq++;
	wake_up_interruptible(&ringbuf_wait);

}
Ejemplo n.º 6
0
static void __update_writeback_rate(struct cached_dev *dc)
{
	/*
	 * PI controller:
	 * Figures out the amount that should be written per second.
	 *
	 * First, the error (number of sectors that are dirty beyond our
	 * target) is calculated.  The error is accumulated (numerically
	 * integrated).
	 *
	 * Then, the proportional value and integral value are scaled
	 * based on configured values.  These are stored as inverses to
	 * avoid fixed point math and to make configuration easy-- e.g.
	 * the default value of 40 for writeback_rate_p_term_inverse
	 * attempts to write at a rate that would retire all the dirty
	 * blocks in 40 seconds.
	 *
	 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
	 * of the error is accumulated in the integral term per second.
	 * This acts as a slow, long-term average that is not subject to
	 * variations in usage like the p term.
	 */
	int64_t target = __calc_target_rate(dc);
	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
	int64_t error = dirty - target;
	int64_t proportional_scaled =
		div_s64(error, dc->writeback_rate_p_term_inverse);
	int64_t integral_scaled;
	uint32_t new_rate;

	if ((error < 0 && dc->writeback_rate_integral > 0) ||
	    (error > 0 && time_before64(local_clock(),
			 dc->writeback_rate.next + NSEC_PER_MSEC))) {
		/*
		 * Only decrease the integral term if it's more than
		 * zero.  Only increase the integral term if the device
		 * is keeping up.  (Don't wind up the integral
		 * ineffectively in either case).
		 *
		 * It's necessary to scale this by
		 * writeback_rate_update_seconds to keep the integral
		 * term dimensioned properly.
		 */
		dc->writeback_rate_integral += error *
			dc->writeback_rate_update_seconds;
	}

	integral_scaled = div_s64(dc->writeback_rate_integral,
			dc->writeback_rate_i_term_inverse);

	new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
			dc->writeback_rate_minimum, NSEC_PER_SEC);

	dc->writeback_rate_proportional = proportional_scaled;
	dc->writeback_rate_integral_scaled = integral_scaled;
	dc->writeback_rate_change = new_rate -
			atomic_long_read(&dc->writeback_rate.rate);
	atomic_long_set(&dc->writeback_rate.rate, new_rate);
	dc->writeback_rate_target = target;
}
Ejemplo n.º 7
0
static int sched_debug_show(struct seq_file *m, void *v)
{
	u64 ktime, sched_clk, cpu_clk;
	unsigned long flags;
	int cpu;

	local_irq_save(flags);
	ktime = ktime_to_ns(ktime_get());
	sched_clk = sched_clock();
	cpu_clk = local_clock();
	local_irq_restore(flags);

	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
		init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);

#define P(x) \
	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(ktime);
	PN(sched_clk);
	PN(cpu_clk);
	P(jiffies);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
	P(sched_clock_stable);
#endif
#undef PN
#undef P

	SEQ_printf(m, "\n");
	SEQ_printf(m, "sysctl_sched\n");

#define P(x) \
	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(sysctl_sched_latency);
	PN(sysctl_sched_min_granularity);
	PN(sysctl_sched_wakeup_granularity);
	P(sysctl_sched_child_runs_first);
	P(sysctl_sched_features);
#undef PN
#undef P

	SEQ_printf(m, "  .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
		sysctl_sched_tunable_scaling,
		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);

	read_lock_irqsave(&tasklist_lock, flags);
	//for_each_online_cpu(cpu)
	for_each_possible_cpu(cpu)
		print_cpu(m, cpu);
	read_unlock_irqrestore(&tasklist_lock, flags);
	SEQ_printf(m, "\n");

	return 0;
}
Ejemplo n.º 8
0
/*
 * Crude but fast random-number generator.  Uses a linear congruential
 * generator, with occasional help from cpu_clock().
 */
static unsigned long
rcu_random(struct rcu_random_state *rrsp)
{
	if (--rrsp->rrs_count < 0) {
		rrsp->rrs_state += (unsigned long)local_clock();
		rrsp->rrs_count = RCU_RANDOM_REFRESH;
	}
	rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
	return swahw32(rrsp->rrs_state);
}
Ejemplo n.º 9
0
static ssize_t store_boot_stat(struct device *dev,
				struct device_attribute *attr, const char *buf, size_t count)
{
	unsigned long long t;
	
	if(!strncmp(buf,"!@Boot: start init process",26)) {
		t = local_clock();
		do_div(t, 1000000);
		boot_events[SYSTEM_START_INIT_PROCESS].time = (unsigned int)t;
	}

	return count;
}
Ejemplo n.º 10
0
void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb,
		struct adreno_submit_time *time)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	BUG_ON(rb->wptr == 0);

	/* Let the pwrscale policy know that new commands have
	 been submitted. */
	kgsl_pwrscale_busy(rb->device);

	/* Write the changes to CFF if so enabled */
	_cff_write_ringbuffer(rb);

	/*
	 * Read the current GPU ticks and wallclock for most accurate
	 * profiling
	 */

	if (time != NULL) {
		/*
		 * Here we are attempting to create a mapping between the
		 * GPU time domain (alwayson counter) and the CPU time domain
		 * (local_clock) by sampling both values as close together as
		 * possible. This is useful for many types of debugging and
		 * profiling. In order to make this mapping as accurate as
		 * possible, we must turn off interrupts to avoid running
		 * interrupt handlers between the two samples.
		 */
		unsigned long flags;
		local_irq_save(flags);

		if (gpudev->alwayson_counter_read != NULL)
			time->ticks = gpudev->alwayson_counter_read(adreno_dev);
		else
			time->ticks = 0;

		/* Get the kernel clock for time since boot */
		time->ktime = local_clock();

		/* Get the timeofday for the wall time (for the user) */
		getnstimeofday(&time->utime);

		local_irq_restore(flags);
	}

	/* Memory barrier before informing the hardware of new commands */
	mb();

	adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr);
}
Ejemplo n.º 11
0
static void __update_writeback_rate(struct cached_dev *dc)
{
	struct cache_set *c = dc->disk.c;
	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
	uint64_t cache_dirty_target =
		div_u64(cache_sectors * dc->writeback_percent, 100);

	int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
				   c->cached_dev_sectors);

	/* PD controller */

	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
	int64_t derivative = dirty - dc->disk.sectors_dirty_last;
	int64_t proportional = dirty - target;
	int64_t change;

	dc->disk.sectors_dirty_last = dirty;

	/* Scale to sectors per second */

	proportional *= dc->writeback_rate_update_seconds;
	proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);

	derivative = div_s64(derivative, dc->writeback_rate_update_seconds);

	derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
			      (dc->writeback_rate_d_term /
			       dc->writeback_rate_update_seconds) ?: 1, 0);

	derivative *= dc->writeback_rate_d_term;
	derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);

	change = proportional + derivative;

	/* Don't increase writeback rate if the device isn't keeping up */
	if (change > 0 &&
	    time_after64(local_clock(),
			 dc->writeback_rate.next + NSEC_PER_MSEC))
		change = 0;

	dc->writeback_rate.rate =
		clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
			1, NSEC_PER_MSEC);

	dc->writeback_rate_proportional = proportional;
	dc->writeback_rate_derivative = derivative;
	dc->writeback_rate_change = change;
	dc->writeback_rate_target = target;
}
Ejemplo n.º 12
0
static int sched_debug_show(struct seq_file *m, void *v)
{
	u64 ktime, sched_clk, cpu_clk;
	unsigned long flags;
	int cpu;

	local_irq_save(flags);
	ktime = ktime_to_ns(ktime_get());
	sched_clk = sched_clock();
	cpu_clk = local_clock();
	local_irq_restore(flags);

	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
		init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);
Ejemplo n.º 13
0
void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb,
		struct adreno_submit_time *time)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
	BUG_ON(rb->wptr == 0);

	
	_cff_write_ringbuffer(rb);


	if (time != NULL) {
		unsigned long flags;
		local_irq_save(flags);

		
		if (!adreno_is_a3xx(adreno_dev)) {
			adreno_readreg64(adreno_dev,
				ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
				ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
				&time->ticks);

			if (ADRENO_GPUREV(adreno_dev) >= 400 &&
				ADRENO_GPUREV(adreno_dev) <= ADRENO_REV_A530)
				time->ticks &= 0xFFFFFFFF;
		}
		else
			time->ticks = 0;

		
		time->ktime = local_clock();

		
		getnstimeofday(&time->utime);

		local_irq_restore(flags);
	}

	
	mb();

	if (adreno_preempt_state(adreno_dev, ADRENO_DISPATCHER_PREEMPT_CLEAR) &&
		(adreno_dev->cur_rb == rb)) {
		kgsl_pwrscale_busy(rb->device);
		adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr);
	}
}
Ejemplo n.º 14
0
void sec_boot_stat_add(const char * c)
{
	int i;
	unsigned long long t;
	
	i = 0;
	while(boot_events[i].string != NULL)
	{
		if(strcmp(c, boot_events[i].string) == 0)
		{
			t = local_clock();
			do_div(t, 1000000);
			boot_events[i].time = (unsigned int)t;
			break;
		}
		i = i + 1;
	}
}
Ejemplo n.º 15
0
/**
 * bch_next_delay() - update ratelimiting statistics and calculate next delay
 * @d: the struct bch_ratelimit to update
 * @done: the amount of work done, in arbitrary units
 *
 * Increment @d by the amount of work done, and return how long to delay in
 * jiffies until the next time to do some work.
 */
uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
{
	uint64_t now = local_clock();

	d->next += div_u64(done * NSEC_PER_SEC, atomic_long_read(&d->rate));

	/* Bound the time.  Don't let us fall further than 2 seconds behind
	 * (this prevents unnecessary backlog that would make it impossible
	 * to catch up).  If we're ahead of the desired writeback rate,
	 * don't let us sleep more than 2.5 seconds (so we can notice/respond
	 * if the control system tells us to speed up!).
	 */
	if (time_before64(now + NSEC_PER_SEC * 5LLU / 2LLU, d->next))
		d->next = now + NSEC_PER_SEC * 5LLU / 2LLU;

	if (time_after64(now - NSEC_PER_SEC * 2, d->next))
		d->next = now - NSEC_PER_SEC * 2;

	return time_after64(d->next, now)
		? div_u64(d->next - now, NSEC_PER_SEC / HZ)
		: 0;
}
Ejemplo n.º 16
0
static int spi_slave_time_submit(struct spi_slave_time_priv *priv)
{
	u32 rem_us;
	int ret;
	u64 ts;

	ts = local_clock();
	rem_us = do_div(ts, 1000000000) / 1000;

	priv->buf[0] = cpu_to_be32(ts);
	priv->buf[1] = cpu_to_be32(rem_us);

	spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);

	priv->msg.complete = spi_slave_time_complete;
	priv->msg.context = priv;

	ret = spi_async(priv->spi, &priv->msg);
	if (ret)
		dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);

	return ret;
}
Ejemplo n.º 17
0
Archivo: net.c Proyecto: 020gzh/linux
static inline unsigned long busy_clock(void)
{
	return local_clock() >> 10;
}
static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
{
	bool status_down;
	uint64_t timeout;
	uint64_t now;
	struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
	struct device *dev = mdm->dev;
	int ret;
	int max_spin = 20;

	switch (notify) {
	case ESOC_IMG_XFER_DONE:
		dev_info(dev, "%s ESOC_IMG_XFER_DONE\n", __func__);
		if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) ==  0)
			schedule_delayed_work(&mdm->mdm2ap_status_check_work,
				msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
		break;
	case ESOC_BOOT_DONE:
		esoc_clink_evt_notify(ESOC_RUN_STATE, esoc);
		break;
	case ESOC_IMG_XFER_RETRY:
		mdm->init = 1;
		mdm_toggle_soft_reset(mdm);
		break;
	case ESOC_IMG_XFER_FAIL:
		esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc);
		break;
	case ESOC_BOOT_FAIL:
		esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc);
		break;
	case ESOC_UPGRADE_AVAILABLE:
		break;
	case ESOC_DEBUG_DONE:
		mdm->debug_fail = false;
		mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
		complete(&mdm->debug_done);
		break;
	case ESOC_DEBUG_FAIL:
		mdm->debug_fail = true;
		complete(&mdm->debug_done);
		break;
	case ESOC_PRIMARY_CRASH:
		mdm_disable_irqs(mdm);
		status_down = false;
		dev_info(dev, "signal apq err fatal for graceful restart\n");
		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
		gpio_set_value(MDM_GPIO(mdm, AP2MDM_VDDMIN), 1);
		timeout = local_clock();
		do_div(timeout, NSEC_PER_MSEC);
		timeout += MDM_MODEM_TIMEOUT;
		do {
			if (gpio_get_value(MDM_GPIO(mdm,
						MDM2AP_STATUS)) == 0) {
				status_down = true;
				break;
			}
			now = local_clock();
			do_div(now, NSEC_PER_MSEC);
		} while (!time_after64(now, timeout));

		if (!status_down) {
			dev_err(mdm->dev, "%s MDM2AP status did not go low\n",
								__func__);
			gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
					      !!mdm->soft_reset_inverted);
			/*
			 * allow PS hold assert to be detected.
			 * pmic requires 6ms for crash reset case.
			 */
			mdelay(6);
			gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
					      !mdm->soft_reset_inverted);
		}
		break;
	case ESOC_PRIMARY_REBOOT:
		exynos_pcie_disable_irq(0);
		mdm_disable_irqs(mdm);
		dev_info(mdm->dev, "Triggering mdm cold reset");
		mdm->ready = 0;
		while (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) && max_spin--) {
			msleep(100);
			dev_info(mdm->dev, "gpio_get_value(MDM2AP_STATUS) : %d\n",
					gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)));
		}
		gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
				!!mdm->soft_reset_inverted);
		mdelay(300);
		gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
				!mdm->soft_reset_inverted);
		break;
	case ESOC_DIAG_DISABLE:
		dev_info(mdm->dev, "Send diag_disable noti\n");
		ret = sysmon_send_diag_disable_noti(mdm->sysmon_subsys_id);
		if (ret < 0)
			dev_err(mdm->dev, "sending diag_disable noti is failed, ret = %d\n", ret);
		else
			dev_info(mdm->dev, "sending diag_disable noti is succeed.\n");
		break;
	case ESOC_FORCE_CPCRASH:
		dev_err(mdm->dev, "Force CP Crash\n");
		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
		gpio_set_value(MDM_GPIO(mdm, AP2MDM_VDDMIN), 1);
		break;
	case ESOC_CP_SILENT_RESET:
		dev_err(mdm->dev, "Force CP Silent Reset\n");
		set_silent_reset();
		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
		gpio_set_value(MDM_GPIO(mdm, AP2MDM_VDDMIN), 1);
		break;
	};
	return;
}
Ejemplo n.º 19
0
Archivo: clock.c Proyecto: 020gzh/linux
/*
 * Running clock - returns the time that has elapsed while a guest has been
 * running.
 * On a guest this value should be local_clock minus the time the guest was
 * suspended by the hypervisor (for any reason).
 * On bare metal this function should return the same as local_clock.
 * Architectures and sub-architectures can override this.
 */
u64 __weak running_clock(void)
{
	return local_clock();
}
Ejemplo n.º 20
0
static void sched_debug_header(struct seq_file *m)
{
	u64 ktime, sched_clk, cpu_clk;
	unsigned long flags;

	local_irq_save(flags);
	ktime = ktime_to_ns(ktime_get());
	sched_clk = sched_clock();
	cpu_clk = local_clock();
	local_irq_restore(flags);

	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
		init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);

#define P(x) \
	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(ktime);
	PN(sched_clk);
	PN(cpu_clk);
	P(jiffies);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
	P(sched_clock_stable);
#endif
#undef PN
#undef P

	SEQ_printf(m, "\n");
	SEQ_printf(m, "sysctl_sched\n");

#define P(x) \
	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(sysctl_sched_latency);
	PN(sysctl_sched_min_granularity);
	PN(sysctl_sched_wakeup_granularity);
	P(sysctl_sched_child_runs_first);
	P(sysctl_sched_features);
#ifdef CONFIG_SCHED_HMP
	P(sched_small_task);
	P(sched_upmigrate);
	P(sched_downmigrate);
	P(sched_init_task_load_windows);
	P(sched_init_task_load_pelt);
	P(min_capacity);
	P(max_capacity);
	P(sched_use_pelt);
	P(sched_ravg_window);
#endif
#undef PN
#undef P

	SEQ_printf(m, "  .%-40s: %d (%s)\n",
		"sysctl_sched_tunable_scaling",
		sysctl_sched_tunable_scaling,
		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
	SEQ_printf(m, "\n");
}
Ejemplo n.º 21
0
/*
 * Returns seconds, approximately.  We don't need nanosecond
 * resolution, and we don't need to waste time with a big divide when
 * 2^30ns == 1.074s.
 */
static unsigned long get_timestamp(void)
{
	return local_clock() >> 30LL;  /* 2^30 ~= 10^9 */
}
Ejemplo n.º 22
0
static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
{
	bool status_down;
	uint64_t timeout;
	uint64_t now;
	struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
	struct device *dev = mdm->dev;

	switch (notify) {
	case ESOC_IMG_XFER_DONE:
		dev_info(dev, "%s ESOC_IMG_XFER_DONE\n", __func__);
		if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) ==  0)
			schedule_delayed_work(&mdm->mdm2ap_status_check_work,
				msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
		break;
	case ESOC_BOOT_DONE:
		esoc_clink_evt_notify(ESOC_RUN_STATE, esoc);
		break;
	case ESOC_IMG_XFER_RETRY:
		mdm->init = 1;
		mdm_toggle_soft_reset(mdm);
		break;
	case ESOC_IMG_XFER_FAIL:
		esoc_clink_evt_notify(ESOC_BOOT_FAIL, esoc);
		break;
	case ESOC_UPGRADE_AVAILABLE:
		break;
	case ESOC_DEBUG_DONE:
		mdm->debug_fail = false;
		mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
		complete(&mdm->debug_done);
		break;
	case ESOC_DEBUG_FAIL:
		mdm->debug_fail = true;
		complete(&mdm->debug_done);
		break;
	case ESOC_PRIMARY_CRASH:
		mdm_disable_irqs(mdm);
		status_down = false;
		dev_info(dev, "signal apq err fatal for graceful restart\n");
		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
		timeout = local_clock();
		do_div(timeout, NSEC_PER_MSEC);
		timeout += MDM_MODEM_TIMEOUT;
		do {
			if (gpio_get_value(MDM_GPIO(mdm,
						MDM2AP_STATUS)) == 0) {
				status_down = true;
				break;
			}
			now = local_clock();
			do_div(now, NSEC_PER_MSEC);
		} while (!time_after64(now, timeout));

		if (!status_down) {
			dev_err(mdm->dev, "%s MDM2AP status did not go low\n",
								__func__);
			gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
					      !!mdm->soft_reset_inverted);
			/*
			 * allow PS hold assert to be detected.
			 * pmic requires 6ms for crash reset case.
			 */
			mdelay(6);
			gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
					      !mdm->soft_reset_inverted);
		}
		break;
	case ESOC_PRIMARY_REBOOT:
		dev_info(mdm->dev, "Triggering mdm cold reset");
		mdm->ready = 0;
		gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
				!!mdm->soft_reset_inverted);
		mdelay(300);
		gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
				!mdm->soft_reset_inverted);
		break;
	};
	return;
}
Ejemplo n.º 23
0
/*
 * trace_clock(): 'between' trace clock. Not completely serialized,
 * but not completely incorrect when crossing CPUs either.
 *
 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
 * jitter between CPUs. So it's a pretty scalable clock, but there
 * can be offsets in the trace data.
 */
u64 notrace trace_clock(void)
{
	return local_clock();
}