static int cpufreq_stats_update(unsigned int cpu)
{
	struct cpufreq_stats *stat;
	unsigned long long cur_time;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = per_cpu(cpufreq_stats_table, cpu);
	if (!stat) {
		spin_unlock(&cpufreq_stats_lock);
		return 0;
	}

	if (stat->time_in_state)
		stat->time_in_state[stat->last_index] =
			cputime64_add(stat->time_in_state[stat->last_index],
				      cputime_sub(cur_time, stat->last_time));

	if (cpu == 0)
		cpu0_time_in_state[stat->last_index] =
			cputime64_add(cpu0_time_in_state[stat->last_index],
			cputime_sub(cur_time, stat->last_time));
	else if (cpu == 1)
		cpu1_time_in_state[stat->last_index] =
			cputime64_add(cpu1_time_in_state[stat->last_index],
			cputime_sub(cur_time, stat->last_time));
#ifdef CONFIG_QUAD_CORES_SOC_STAT
	else if (cpu == 2)
		cpu2_time_in_state[stat->last_index] =
			cputime64_add(cpu2_time_in_state[stat->last_index],
			cputime_sub(cur_time, stat->last_time));
	else if (cpu == 3)
		cpu3_time_in_state[stat->last_index] =
			cputime64_add(cpu3_time_in_state[stat->last_index],
			cputime_sub(cur_time, stat->last_time));
#endif
	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);
	return 0;
}
Exemple #2
0
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
						  cputime64_t *wall)
{
	u64 idle_time;
	u64 cur_wall_time;
	u64 busy_time;

	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());

	busy_time  = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];

	idle_time = cur_wall_time - busy_time;
	if (wall)
		*wall = jiffies_to_usecs(cur_wall_time);

	return jiffies_to_usecs(idle_time);
}
Exemple #3
0
static int cpufreq_stats_update(unsigned int cpu)
{
	struct cpufreq_stats *stat;
	unsigned long long cur_time;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = per_cpu(cpufreq_stats_table, cpu);
	if (!stat || stat->last_index == -1) {
		spin_unlock(&cpufreq_stats_lock);
		return 0;
	}

	if (stat->time_in_state)
		stat->time_in_state[stat->last_index] +=
			cur_time - stat->last_time;

#ifdef CONFIG_ARCH_APQ8064
   if (cpu == 1)
	cpu1_time_in_state[stat->last_index] +=
			cur_time - stat->last_time;

   if (cpu == 2)
	cpu2_time_in_state[stat->last_index] +=
			cur_time - stat->last_time;

   if (cpu == 3)
	cpu3_time_in_state[stat->last_index] +=
			cur_time - stat->last_time;
#elif defined(CONFIG_ARCH_MSM8960)
	if (cpu == 1)
		cpu1_time_in_state[stat->last_index] +=
			cur_time - stat->last_time;
#endif

	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);
	return 0;
}
Exemple #4
0
/**
 * hdaps_update - acquire locks and query current state
 *
 * Query current accelerometer state and update global state variables.
 * Also prefetches the next query.
 * Retries until timeout if the accelerometer is not in ready status (common).
 * Does its own locking.
 */
static int hdaps_update(void)
{
	u64 age = get_jiffies_64() - last_update_jiffies;
	int total, ret;

	if (!stale_readout && age < (9*HZ)/(10*sampling_rate))
		return 0; /* already updated recently */
	for (total = 0; total < READ_TIMEOUT_MSECS; total += RETRY_MSECS) {
		ret = thinkpad_ec_lock();
		if (ret)
			return ret;
		ret = __hdaps_update(0);
		thinkpad_ec_unlock();

		if (!ret)
			return 0;
		if (ret != -EBUSY)
			break;
		msleep(RETRY_MSECS);
	}
	return ret;
}
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
						  cputime64_t *wall)
{
	cputime64_t idle_time;
	cputime64_t cur_wall_time;
	cputime64_t busy_time;

	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
			kstat_cpu(cpu).cpustat.system);

	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);

	idle_time = cputime64_sub(cur_wall_time, busy_time);
	if (wall)
		*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);

	return (cputime64_t)jiffies_to_usecs(idle_time);
}
static void hp_stats_update(unsigned int cpu, bool up)
{
    u64 cur_jiffies = get_jiffies_64();
    bool was_up = hp_stats[cpu].up_down_count & 0x1;

    if (was_up)
        hp_stats[cpu].time_up_total = cputime64_add(
                                          hp_stats[cpu].time_up_total, cputime64_sub(
                                              cur_jiffies, hp_stats[cpu].last_update));

    if (was_up != up) {
        hp_stats[cpu].up_down_count++;
        if ((hp_stats[cpu].up_down_count & 0x1) != up) {
            /* FIXME: sysfs user space CPU control breaks stats */
            pr_err("tegra hotplug stats out of sync with %s CPU%d",
                   (cpu < CONFIG_NR_CPUS) ? "G" : "LP",
                   (cpu < CONFIG_NR_CPUS) ?  cpu : 0);
            hp_stats[cpu].up_down_count ^=  0x1;
        }
    }
    hp_stats[cpu].last_update = cur_jiffies;
}
static void s3cfb_start_progress(struct fb_info *fb)
{	
	int x_pos;
	init_timer(&progress_timer);	

	progress_timer.expires  = (get_jiffies_64() + (HZ/20));	
	progress_timer.data     = (long)fb;	
	progress_timer.function = progress_timer_handler;	
	progress_pos = PROGRESS_BAR_LEFT_POS;	

	// draw progress background.
	for (x_pos = PROGRESS_BAR_LEFT_POS ; x_pos <= PROGRESS_BAR_RIGHT_POS ; x_pos += PROGRESS_BAR_WIDTH){
		s3cfb_update_framebuffer(fb,
			x_pos,
			PROGRESS_BAR_START_Y,
			(void*)anycall_progress_bar,					
			PROGRESS_BAR_WIDTH,
			PROGRESS_BAR_HEIGHT);
	}
	s3cfb_update_framebuffer(fb,
		PROGRESS_BAR_LEFT_POS,
		PROGRESS_BAR_START_Y,
		(void*)anycall_progress_bar_left,					
		PROGRESS_BAR_WIDTH,
		PROGRESS_BAR_HEIGHT);
	
	progress_pos += PROGRESS_BAR_WIDTH;	
	
	s3cfb_update_framebuffer(fb,		
		progress_pos,
		PROGRESS_BAR_START_Y,		
		(void*)anycall_progress_bar_right,				
		PROGRESS_BAR_WIDTH,
		PROGRESS_BAR_HEIGHT);
	
	add_timer(&progress_timer);	
	progress_flag = 1;

}
Exemple #8
0
/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
 *   prepare for safe eviction of KFD BOs that belong to the specified
 *   process.
 *
 * @mm: mm_struct that identifies the specified KFD process
 * @fence: eviction fence attached to KFD process BOs
 *
 */
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
					       struct dma_fence *fence)
{
	struct kfd_process *p;
	unsigned long active_time;
	unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);

	if (!fence)
		return -EINVAL;

	if (dma_fence_is_signaled(fence))
		return 0;

	p = kfd_lookup_process_by_mm(mm);
	if (!p)
		return -ENODEV;

	if (fence->seqno == p->last_eviction_seqno)
		goto out;

	p->last_eviction_seqno = fence->seqno;

	/* Avoid KFD process starvation. Wait for at least
	 * PROCESS_ACTIVE_TIME_MS before evicting the process again
	 */
	active_time = get_jiffies_64() - p->last_restore_timestamp;
	if (delay_jiffies > active_time)
		delay_jiffies -= active_time;
	else
		delay_jiffies = 0;

	/* During process initialization eviction_work.dwork is initialized
	 * to kfd_evict_bo_worker
	 */
	schedule_delayed_work(&p->eviction_work, delay_jiffies);
out:
	kfd_unref_process(p);
	return 0;
}
static void emc_last_stats_update(int last_sel)
{
    unsigned long flags;
    u64 cur_jiffies = get_jiffies_64();

    spin_lock_irqsave(&emc_stats.spinlock, flags);

    if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
        emc_stats.time_at_clock[emc_stats.last_sel] +=
            cur_jiffies - emc_stats.last_update;
    /*		emc_stats.time_at_clock[emc_stats.last_sel] = cputime64_add(
    			emc_stats.time_at_clock[emc_stats.last_sel],
    			cputime64_sub(cur_jiffies, emc_stats.last_update)); */

    emc_stats.last_update = cur_jiffies;

    if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
        emc_stats.clkchange_count++;
        emc_stats.last_sel = last_sel;
    }
    spin_unlock_irqrestore(&emc_stats.spinlock, flags);
}
Exemple #10
0
static int jit_currenttime_proc_show(struct seq_file *m, void *v)
/******************************************************************************/
{
	struct timeval tv1;
	struct timespec tv2;
	unsigned long j1;
	u64 j2;

	/* get them four */
	j1 = jiffies;
	j2 = get_jiffies_64();
	do_gettimeofday(&tv1);
	tv2 = current_kernel_time();

	/* print */
	seq_printf(m,"0x%08lx 0x%016Lx %10i.%06i\n"
	       "%40i.%09i\n",
	       j1, j2,
	       (int) tv1.tv_sec, (int) tv1.tv_usec,
	       (int) tv2.tv_sec, (int) tv2.tv_nsec);
	return 0;
}
static int cpufreq_stats_update(unsigned int cpu)
{
	struct cpufreq_stats *stat;
	unsigned long long cur_time;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = per_cpu(cpufreq_stats_table, cpu);
	if (!stat) {
		spin_unlock(&cpufreq_stats_lock);
		return 0;
	}

	if (stat->time_in_state)
		stat->time_in_state[stat->last_index] +=
			cur_time - stat->last_time;

	if (cpu == 0)
		cpu0_time_in_state[stat->last_index] =
			cpu0_time_in_state[stat->last_index] +
			(cur_time - stat->last_time);
	else if (cpu == 1)
		cpu1_time_in_state[stat->last_index] =
			cpu1_time_in_state[stat->last_index] +
			(cur_time - stat->last_time);
	else if (cpu == 2)
		cpu2_time_in_state[stat->last_index] =
			cpu2_time_in_state[stat->last_index] +
			(cur_time - stat->last_time);
	else if (cpu == 3)
		cpu3_time_in_state[stat->last_index] =
			cpu3_time_in_state[stat->last_index] +
			(cur_time - stat->last_time);

	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);
	return 0;
}
/*
 * Initialize structures that control access to QSFP. Called once per port
 * on cards that support QSFP.
 */
void qib_qsfp_init(struct qib_qsfp_data *qd,
		   void (*fevent)(struct work_struct *))
{
	u32 mask, highs;
	int pins;

	struct qib_devdata *dd = qd->ppd->dd;

	/* Initialize work struct for later QSFP events */
	INIT_WORK(&qd->work, fevent);

	/*
	 * Later, we may want more validation. For now, just set up pins and
	 * blip reset. If module is present, call qib_refresh_qsfp_cache(),
	 * to do further init.
	 */
	mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
	highs = mask - QSFP_GPIO_MOD_RST_N;
	if (qd->ppd->hw_pidx) {
		mask <<= QSFP_GPIO_PORT2_SHIFT;
		highs <<= QSFP_GPIO_PORT2_SHIFT;
	}
	dd->f_gpio_mod(dd, highs, mask, mask);
	udelay(20); /* Generous RST dwell */

	dd->f_gpio_mod(dd, mask, mask, mask);
	/* Spec says module can take up to two seconds! */
	mask = QSFP_GPIO_MOD_PRS_N;
	if (qd->ppd->hw_pidx)
		mask <<= QSFP_GPIO_PORT2_SHIFT;

	/* Do not try to wait here. Better to let event handle it */
	pins = dd->f_gpio_mod(dd, 0, 0, 0);
	if (pins & mask)
		goto bail;
	/* We see a module, but it may be unwise to look yet. Just schedule */
	qd->t_insert = get_jiffies_64();
<<<<<<< HEAD
static int hp_stats_show(struct seq_file *s, void *data)
{
    int i;
    u64 cur_jiffies = get_jiffies_64();

    mutex_lock(tegra3_cpu_lock);
    if (hp_state != TEGRA_HP_DISABLED) {
        for (i = 0; i <= CONFIG_NR_CPUS; i++) {
            bool was_up = (hp_stats[i].up_down_count & 0x1);
            hp_stats_update(i, was_up);
        }
    }
    mutex_unlock(tegra3_cpu_lock);

    seq_printf(s, "%-15s ", "cpu:");
    for (i = 0; i < CONFIG_NR_CPUS; i++) {
        seq_printf(s, "G%-9d ", i);
    }
    seq_printf(s, "LP\n");

    seq_printf(s, "%-15s ", "transitions:");
    for (i = 0; i <= CONFIG_NR_CPUS; i++) {
        seq_printf(s, "%-10u ", hp_stats[i].up_down_count);
    }
    seq_printf(s, "\n");

    seq_printf(s, "%-15s ", "time plugged:");
    for (i = 0; i <= CONFIG_NR_CPUS; i++) {
        seq_printf(s, "%-10llu ",
                   cputime64_to_clock_t(hp_stats[i].time_up_total));
    }
    seq_printf(s, "\n");

    seq_printf(s, "%-15s %llu\n", "time-stamp:",
               cputime64_to_clock_t(cur_jiffies));

    return 0;
}
Exemple #14
0
/*
 * This file, on the other hand, returns the current time forever
   Format: [jiffies jiffies64 gettimeofday.s gettimeofday.us current_kernel_time.s current_kernel_time.ns]

 * cat /proc/currentime | head -8
 */
ssize_t jit_currentime(struct file *filp,char *buf,size_t count,loff_t *offp)
{
    struct timeval tv1;
    struct timespec tv2;
    unsigned long j1;
    u64 j2;

    /* get them four */
    j1 = jiffies;
    j2 = get_jiffies_64();
    do_gettimeofday(&tv1);
    tv2 = current_kernel_time();

    /* print */
    count = sprintf(buf,"0x%08lx 0x%016Lx %10i.%06i\n"
                    "%40i.%09i\n",
                    j1, j2,
                    (int) tv1.tv_sec, (int) tv1.tv_usec,
                    (int) tv2.tv_sec, (int) tv2.tv_nsec);
    *offp += count;

    return count;
}
static int cpufreq_stats_update(unsigned int cpu)
{
	struct cpufreq_stats *stat;
	unsigned long long cur_time;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = per_cpu(cpufreq_stats_table, cpu);
#if defined (CONFIG_MACH_SAMSUNG_P5)
	if(!stat) {
		spin_unlock(&cpufreq_stats_lock);
		return -1;
	}
#endif

	if (stat->time_in_state && stat->last_index >= 0)
		stat->time_in_state[stat->last_index] =
			cputime64_add(stat->time_in_state[stat->last_index],
				      cputime_sub(cur_time, stat->last_time));
	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);
	return 0;
}
static int sensors1p_read(struct device *dev, struct sensor *s, char *buf)
{
	int ret;

	if (!s->active)
		return -EINVAL;

	/* Only wait if read() is called before the sensor is up and running
	 * Since jiffies wraps, always sleep maximum time.
	 */
	if (time_before64(get_jiffies_64(), s->when_enabled))
		mdelay(s->startup_time);

	/* For some odd reason, setting direction in the probe function fails */
	ret = gpio_direction_input(s->pin);

	if (ret)
		dev_err(dev, "Failed to set GPIO pin %d to input.\n", s->pin);
	else
		ret = gpio_get_value(s->pin);

	return sprintf(buf, "%d", ret);
}
Exemple #17
0
asmlinkage long sys_times(struct tms __user * tbuf)
{
	/*
	 *	In the SMP world we might just be unlucky and have one of
	 *	the times increment as we use it. Since the value is an
	 *	atomically safe type this is just fine. Conceptually its
	 *	as if the syscall took an instant longer to occur.
	 */
	if (tbuf) {
		struct tms tmp;
		struct task_struct *tsk = current;
		struct task_struct *t;
		cputime_t utime, stime, cutime, cstime;

		spin_lock_irq(&tsk->sighand->siglock);
		utime = tsk->signal->utime;
		stime = tsk->signal->stime;
		t = tsk;
		do {
			utime = cputime_add(utime, t->utime);
			stime = cputime_add(stime, t->stime);
			t = next_thread(t);
		} while (t != tsk);

		cutime = tsk->signal->cutime;
		cstime = tsk->signal->cstime;
		spin_unlock_irq(&tsk->sighand->siglock);

		tmp.tms_utime = cputime_to_clock_t(utime);
		tmp.tms_stime = cputime_to_clock_t(stime);
		tmp.tms_cutime = cputime_to_clock_t(cutime);
		tmp.tms_cstime = cputime_to_clock_t(cstime);
		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
			return -EFAULT;
	}
	return (long) jiffies_64_to_clock_t(get_jiffies_64());
}
Exemple #18
0
static void s3cfb_start_progress(void)
{
#if S3C_FB_NUM != 1
	struct s3c_fb_info *fbi = &s3c_fb_info[1];
	unsigned short *bg_src, *bg_dst;
	int	i, j, p;
	unsigned int new_wincon1;

	memset(fbi->map_cpu, 0x00, s3c_fimd.logo_size);

	/* 320 * 25 R5G5B5 BMP */
	bg_dst = (unsigned short *)(fbi->map_cpu + ((320 * 410) * 2));
	bg_src = (unsigned short *)(progress_bg + sizeof(progress_bg) - 2);

	for (i = 0; i < 25; i++) {
		for (j = 0; j < 320; j++) {
			p = ((320 * i) + j);
			if ((*(bg_src - p) & 0x7FFF) == 0x0000)
				*(bg_dst + p) = (*(bg_src - p) & ~0x8000);
			else
				*(bg_dst + p) = (*(bg_src - p) | 0x8000);
		}
	}

	new_wincon1 = S3C_WINCONx_ENLOCAL_DMA | S3C_WINCONx_BUFSEL_0 | S3C_WINCONx_BUFAUTOEN_DISABLE | \
	           S3C_WINCONx_BITSWP_DISABLE | S3C_WINCONx_BYTSWP_DISABLE | S3C_WINCONx_HAWSWP_ENABLE | \
	           S3C_WINCONx_BURSTLEN_16WORD | S3C_WINCONx_BLD_PIX_PIXEL | S3C_WINCONx_BPPMODE_F_16BPP_A555 | \
	           S3C_WINCONx_ALPHA_SEL_0 | S3C_WINCONx_ENWIN_F_ENABLE,

	writel(new_wincon1, S3C_WINCON1);

	init_timer(&progress_timer);
	progress_timer.expires = (get_jiffies_64() + (HZ/10));
	progress_timer.function = progress_timer_handler;
	add_timer(&progress_timer);
#endif
}
static ssize_t sreadaheadflag_dbgfs_write(
		struct file *file,
		const char __user *buff,
		size_t count,
		loff_t *ppos)
{
	int state;

	if (copy_from_user(&state, buff, sizeof(int)))
		return 0;

	if (state == PROF_INIT) {
		mutex_lock(&prof_buf.ulock);
		_DBG("PROF_INT");
		prof_buf.state = state;
		mutex_unlock(&prof_buf.ulock);

		_DBG("add timer");
		prof_buf.timer.expires = get_jiffies_64() + (PROF_TIMEOUT * HZ);
		add_timer(&prof_buf.timer);
	} else if (state == PROF_DONE) {
		mutex_lock(&prof_buf.ulock);
		if (prof_buf.state != PROF_RUN) {
			mutex_unlock(&prof_buf.ulock);
			return 0;
		}
		_DBG("PROF_DONE by user daemon(boot_completed)");
		prof_buf.state = state;
		mutex_unlock(&prof_buf.ulock);

		_DBG("del timer");
		del_timer(&prof_buf.timer);
	}

	(*ppos) = 0;
	return sizeof(int);
}
Exemple #20
0
static int SII9234_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
	SII_DEV_DBG("");
	//int retval;

	struct SII9234_state *state;

	state = kzalloc(sizeof(struct SII9234_state), GFP_KERNEL);
	if (state == NULL) {		
		printk("failed to allocate memory \n");
		return -ENOMEM;
	}
	
	state->client = client;
	i2c_set_clientdata(client, state);
	
	/* rest of the initialisation goes here. */
	
	printk("SII9234 attach success!!!\n");

	SII9234_i2c_client = client;

	MHL_i2c_init = 1;
	//schedule_delayed_work(&init_sii9234,5000);
	init_timer(&MHL_reg_check);
	MHL_reg_check.function = check_HDMI_signal;
	MHL_reg_check.expires = get_jiffies_64() + (HZ*10);
	add_timer(&MHL_reg_check);
	
	//MHL_HW_Reset();
  	//sii9234_initial_registers_set();
  	//startTPI();
  	//mhl_output_enable();
	
	return 0;

}
Exemple #21
0
int fuse_update_attributes(struct inode *inode, struct kstat *stat,
			   struct file *file, bool *refreshed)
{
	struct fuse_inode *fi = get_fuse_inode(inode);
	int err;
	bool r;

	if (fi->i_time < get_jiffies_64()) {
		r = true;
		err = fuse_do_getattr(inode, stat, file);
	} else {
		r = false;
		err = 0;
		if (stat) {
			generic_fillattr(inode, stat);
			stat->mode = fi->orig_i_mode;
		}
	}

	if (refreshed != NULL)
		*refreshed = r;

	return err;
}
Exemple #22
0
static void progress_timer_handler(unsigned long data)
{
	int i;
	for(i = 0; i < 4; i++)
	{
		s3cfb_update_framebuffer((struct fb_info *)data,
			PROGRESS_BAR_START_X,
			progress_pos++,
			(void*)anycall_progress_bar_center,
			1,
			PROGRESS_BAR_HEIGHT);
	}

	if (progress_pos + PROGRESS_BAR_HEIGHT >= PROGRESS_BAR_RIGHT_POS )
	{
		s3cfb_stop_progress();
	}
	else
	{
		progress_timer.expires = (get_jiffies_64() + (HZ/20));
		progress_timer.function = progress_timer_handler;
		add_timer(&progress_timer);
	}
}
Exemple #23
0
static int cpufreq_stats_update(unsigned int cpu)
{
	struct cpufreq_stats *stat;
	unsigned long long cur_time;

    if (cpu_is_offline(cpu))
        return 0;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = per_cpu(cpufreq_stats_table, cpu);
	if (!stat) {
		spin_unlock(&cpufreq_stats_lock);
		return 0;
	}

	if (stat->time_in_state && stat->last_index >= 0)
		stat->time_in_state[stat->last_index] =
			cputime64_add(stat->time_in_state[stat->last_index],
				      cputime_sub(cur_time, stat->last_time));
	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);
	return 0;
}
Exemple #24
0
int cpuquiet_register_driver(struct cpuquiet_driver *drv)
{
	int err = -EBUSY;
	unsigned int cpu;
	u64 cur_jiffies;
	struct device *cpu_dev;
	
	if (!drv)
		return -EINVAL;

	stats = kzalloc(nr_cpu_ids * sizeof(*stats), GFP_KERNEL);
	if (!stats)
		return -ENOMEM;

	for_each_possible_cpu(cpu) {
		cur_jiffies = get_jiffies_64();
		stats[cpu].last_update = cur_jiffies;
		if (cpu_online(cpu))
			stats[cpu].up_down_count = 1;
		cpu_dev = get_cpu_device(cpu);
		if (cpu_dev) {
			cpuquiet_add_dev(cpu_dev, cpu);
			cpuquiet_cpu_kobject_init(&stats[cpu].cpu_kobject,
					&ktype_cpu_stats, "stats", cpu);
		}
	}

	mutex_lock(&cpuquiet_lock);
	if (!cpuquiet_curr_driver) {
		err = 0;
		cpuquiet_curr_driver = drv;
	}
	mutex_unlock(&cpuquiet_lock);

	return err;
}
Exemple #25
0
/*
 * This file, on the other hand, returns the current time forever
 */
int jit_currentime(char *buf, char **start, off_t offset,
                   int len, int *eof, void *data)
{
	struct timeval tv1;
	struct timespec tv2;
	unsigned long j1;
	u64 j2;

	/* get them four */
	j1 = jiffies;
	j2 = get_jiffies_64();
	do_gettimeofday(&tv1);
	tv2 = current_kernel_time();

	/* print */
	len=0;
	len += sprintf(buf,"0x%08lx 0x%016Lx %10i.%06i\n"
		       "%40i.%09i\n",
		       j1, j2,
		       (int) tv1.tv_sec, (int) tv1.tv_usec,
		       (int) tv2.tv_sec, (int) tv2.tv_nsec);
	*start = buf;
	return len;
}
static int sensors1p_power_write(struct device *dev,
				 struct sensor *s, const char *buf)
{
	int val;

	if (sscanf(buf, "%d", &val) != 1)
		return -EINVAL;

	if (val != 0 && val != 1)
		return -EINVAL;

	if (val != s->active) {
		if (val) {
			regulator_enable(s->regulator);
			s->when_enabled = get_jiffies_64() +
				msecs_to_jiffies(s->startup_time);
		} else
			regulator_disable(s->regulator);
	}
	s->active = val;

	return strnlen(buf, PAGE_SIZE);

}
Exemple #27
0
static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
		struct cpufreq_frequency_table *table)
{
	unsigned int i, j, count = 0, ret = 0;
	struct cpufreq_stats *stat;
	struct cpufreq_policy *data;
	unsigned int alloc_size;
	unsigned int cpu = policy->cpu;
	if (per_cpu(cpufreq_stats_table, cpu))
		return -EBUSY;
	stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
	if ((stat) == NULL)
		return -ENOMEM;

	data = cpufreq_cpu_get(cpu);
	if (data == NULL) {
		ret = -EINVAL;
		goto error_get_fail;
	}

	ret = sysfs_create_group(&data->kobj, &stats_attr_group);
	if (ret)
		goto error_out;

	stat->cpu = cpu;
	per_cpu(cpufreq_stats_table, cpu) = stat;

	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned int freq = table[i].frequency;
		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;
		count++;
	}

	alloc_size = count * sizeof(int) + count * sizeof(u64);

#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
	alloc_size += count * count * sizeof(int);
#endif
	stat->max_state = count;
	stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
	if (!stat->time_in_state) {
		ret = -ENOMEM;
		goto error_out;
	}
	stat->freq_table = (unsigned int *)(stat->time_in_state + count);

#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
	stat->trans_table = stat->freq_table + count;
#endif
	j = 0;
	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned int freq = table[i].frequency;
		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;
		if (freq_table_get_index(stat, freq) == -1)
			stat->freq_table[j++] = freq;
	}
	stat->state_num = j;
	spin_lock(&cpufreq_stats_lock);
	stat->last_time = get_jiffies_64();
	stat->last_index = freq_table_get_index(stat, policy->cur);
	spin_unlock(&cpufreq_stats_lock);
	cpufreq_cpu_put(data);
	return 0;
error_out:
	cpufreq_cpu_put(data);
error_get_fail:
	kfree(stat);
	per_cpu(cpufreq_stats_table, cpu) = NULL;
	return ret;
}
Exemple #28
0
/*
 * Check whether the dentry is still valid
 *
 * If the entry validity timeout has expired and the dentry is
 * positive, try to redo the lookup.  If the lookup results in a
 * different inode, then let the VFS invalidate the dentry and redo
 * the lookup once more.  If the lookup results in the same inode,
 * then refresh the attributes, timeouts and mark the dentry valid.
 */
static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
{
	struct inode *inode;
	struct dentry *parent;
	struct fuse_conn *fc;
	int ret;

	inode = ACCESS_ONCE(entry->d_inode);
	if (inode && is_bad_inode(inode))
		goto invalid;
	else if (fuse_dentry_time(entry) < get_jiffies_64()) {
		int err;
		struct fuse_entry_out outarg;
		struct fuse_req *req;
		struct fuse_forget_link *forget;
		u64 attr_version;

		/* For negative dentries, always do a fresh lookup */
		if (!inode)
			goto invalid;

		ret = -ECHILD;
		if (flags & LOOKUP_RCU)
			goto out;

		fc = get_fuse_conn(inode);
		req = fuse_get_req_nopages(fc);
		ret = PTR_ERR(req);
		if (IS_ERR(req))
			goto out;

		forget = fuse_alloc_forget();
		if (!forget) {
			fuse_put_request(fc, req);
			ret = -ENOMEM;
			goto out;
		}

		attr_version = fuse_get_attr_version(fc);

		parent = dget_parent(entry);
		fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
				 &entry->d_name, &outarg);
		fuse_request_send(fc, req);
		dput(parent);
		err = req->out.h.error;
		fuse_put_request(fc, req);
		/* Zero nodeid is same as -ENOENT */
		if (!err && !outarg.nodeid)
			err = -ENOENT;
		if (!err) {
			struct fuse_inode *fi = get_fuse_inode(inode);
			if (outarg.nodeid != get_node_id(inode)) {
				fuse_queue_forget(fc, forget, outarg.nodeid, 1);
				goto invalid;
			}
			spin_lock(&fc->lock);
			fi->nlookup++;
			spin_unlock(&fc->lock);
		}
		kfree(forget);
		if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
			goto invalid;

		fuse_change_attributes(inode, &outarg.attr,
				       entry_attr_timeout(&outarg),
				       attr_version);
		fuse_change_entry_timeout(entry, &outarg);
	} else if (inode) {
		fc = get_fuse_conn(inode);
		if (fc->readdirplus_auto) {
			parent = dget_parent(entry);
			fuse_advise_use_readdirplus(parent->d_inode);
			dput(parent);
		}
	}
	ret = 1;
out:
	return ret;

invalid:
	ret = 0;
	if (check_submounts_and_drop(entry) != 0)
		ret = 1;
	goto out;
}
static void cpufreq_interactive_timer(unsigned long data)
{
	u64 now;
	unsigned int delta_time;
	u64 cputime_speedadj;
	int cpu_load;
	struct cpufreq_interactive_cpuinfo *pcpu =
		&per_cpu(cpuinfo, data);
	unsigned int new_freq;
	unsigned int loadadjfreq;
	unsigned int index;
	unsigned long flags;
	bool boosted;
	unsigned long mod_min_sample_time;
	int i, max_load;
	unsigned int max_freq;
	unsigned int boosted_freq;
	struct cpufreq_interactive_cpuinfo *picpu;

	if (!down_read_trylock(&pcpu->enable_sem))
		return;
	if (!pcpu->governor_enabled)
		goto exit;

	spin_lock_irqsave(&pcpu->load_lock, flags);
	now = update_load(data);
	delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
	cputime_speedadj = pcpu->cputime_speedadj;
	pcpu->last_evaluated_jiffy = get_jiffies_64();
	spin_unlock_irqrestore(&pcpu->load_lock, flags);

	if (WARN_ON_ONCE(!delta_time))
		goto rearm;

	spin_lock_irqsave(&pcpu->target_freq_lock, flags);
	do_div(cputime_speedadj, delta_time);
	loadadjfreq = (unsigned int)cputime_speedadj * 100;
	cpu_load = loadadjfreq / pcpu->target_freq;
	pcpu->prev_load = cpu_load;
	boosted = boost_val || now < boostpulse_endtime;
	boosted_freq = max(hispeed_freq, pcpu->policy->min);

	if (cpu_load >= go_hispeed_load || boosted) {
		if (pcpu->target_freq < boosted_freq) {
			new_freq = boosted_freq;
		} else {
			new_freq = choose_freq(pcpu, loadadjfreq);

			if (new_freq > freq_calc_thresh)
				new_freq = pcpu->policy->max * cpu_load / 100;

			if (new_freq < boosted_freq)
				new_freq = boosted_freq;
		}
	} else {
		new_freq = choose_freq(pcpu, loadadjfreq);

		if (new_freq > freq_calc_thresh)
			new_freq = pcpu->policy->max * cpu_load / 100;

		if (sync_freq && new_freq < sync_freq) {

			max_load = 0;
			max_freq = 0;

			for_each_online_cpu(i) {
				picpu = &per_cpu(cpuinfo, i);

				if (i == data || picpu->prev_load <
						up_threshold_any_cpu_load)
					continue;

				max_load = max(max_load, picpu->prev_load);
				max_freq = max(max_freq, picpu->target_freq);
			}

			if (max_freq > up_threshold_any_cpu_freq ||
				max_load >= up_threshold_any_cpu_load)
				new_freq = sync_freq;
		}
	}
Exemple #30
0
/*
 * Check whether the dentry is still valid
 *
 * If the entry validity timeout has expired and the dentry is
 * positive, try to redo the lookup.  If the lookup results in a
 * different inode, then let the VFS invalidate the dentry and redo
 * the lookup once more.  If the lookup results in the same inode,
 * then refresh the attributes, timeouts and mark the dentry valid.
 */
static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
{
	struct inode *inode = entry->d_inode;

	if (inode && is_bad_inode(inode))
		return 0;
	else if (fuse_dentry_time(entry) < get_jiffies_64()) {
		int err;
		struct fuse_entry_out outarg;
		struct fuse_conn *fc;
		struct fuse_req *req;
		struct fuse_req *forget_req;
		struct dentry *parent;
		u64 attr_version;

		/* For negative dentries, always do a fresh lookup */
		if (!inode)
			return 0;

		fc = get_fuse_conn(inode);
		req = fuse_get_req(fc);
		if (IS_ERR(req))
			return 0;

		forget_req = fuse_get_req(fc);
		if (IS_ERR(forget_req)) {
			fuse_put_request(fc, req);
			return 0;
		}

		attr_version = fuse_get_attr_version(fc);

		parent = dget_parent(entry);
		fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
				 &entry->d_name, &outarg);
		fuse_request_send(fc, req);
		dput(parent);
		err = req->out.h.error;
		fuse_put_request(fc, req);
		/* Zero nodeid is same as -ENOENT */
		if (!err && !outarg.nodeid)
			err = -ENOENT;
		if (!err) {
			struct fuse_inode *fi = get_fuse_inode(inode);
			if (outarg.nodeid != get_node_id(inode)) {
				fuse_send_forget(fc, forget_req,
						 outarg.nodeid, 1);
				return 0;
			}
			spin_lock(&fc->lock);
			fi->nlookup++;
			spin_unlock(&fc->lock);
		}
		fuse_put_request(fc, forget_req);
		if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
			return 0;

		fuse_change_attributes(inode, &outarg.attr,
				       entry_attr_timeout(&outarg),
				       attr_version);
		fuse_change_entry_timeout(entry, &outarg);
	}
	return 1;
}