示例#1
0
static void report_timing(void)
{
	unsigned long since = jiffies - timing_stats.last_report_time;

	/* If it's been more than one second... */
	if (since >= HZ) {
		int first = (timing_stats.last_report_time == 0);

		timing_stats.last_report_time = jiffies;
		if (!first)
#ifdef CONFIG_DEBUG_PRINTK
			printk(KERN_INFO IPWIRELESS_PCCARD_NAME
			       ": %u us elapsed - read %lu bytes in %u us, wrote %lu bytes in %u us\n",
			       jiffies_to_usecs(since),
			       timing_stats.read_bytes,
			       jiffies_to_usecs(timing_stats.read_time),
			       timing_stats.write_bytes,
			       jiffies_to_usecs(timing_stats.write_time));
#else
			;
#endif

		timing_stats.read_time = 0;
		timing_stats.write_time = 0;
		timing_stats.read_bytes = 0;
		timing_stats.write_bytes = 0;
	}
}
示例#2
0
/**
 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
 * @pgdat: A pgdat to check if it is heavily congested
 * @sync: SYNC or ASYNC IO
 * @timeout: timeout in jiffies
 *
 * In the event of a congested backing_dev (any backing_dev) and the given
 * @pgdat has experienced recent congestion, this waits for up to @timeout
 * jiffies for either a BDI to exit congestion of the given @sync queue
 * or a write to complete.
 *
 * In the absence of pgdat congestion, cond_resched() is called to yield
 * the processor if necessary but otherwise does not sleep.
 *
 * The return value is 0 if the sleep is for the full timeout. Otherwise,
 * it is the number of jiffies that were still remaining when the function
 * returned. return_value == timeout implies the function did not sleep.
 */
long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout)
{
	long ret;
	unsigned long start = jiffies;
	DEFINE_WAIT(wait);
	wait_queue_head_t *wqh = &congestion_wqh[sync];

	/*
	 * If there is no congestion, or heavy congestion is not being
	 * encountered in the current pgdat, yield if necessary instead
	 * of sleeping on the congestion queue
	 */
	if (atomic_read(&nr_wb_congested[sync]) == 0 ||
	    !test_bit(PGDAT_CONGESTED, &pgdat->flags)) {
		cond_resched();

		/* In case we scheduled, work out time remaining */
		ret = timeout - (jiffies - start);
		if (ret < 0)
			ret = 0;

		goto out;
	}

	/* Sleep until uncongested or a write happens */
	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
	ret = io_schedule_timeout(timeout);
	finish_wait(wqh, &wait);

out:
	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
					jiffies_to_usecs(jiffies - start));

	return ret;
}
示例#3
0
static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{
	struct tpm_chip *chip = to_tpm_chip(dev);

	return sprintf(buf, "%d %d %d %d [%s]\n",
		       jiffies_to_usecs(chip->timeout_a),
		       jiffies_to_usecs(chip->timeout_b),
		       jiffies_to_usecs(chip->timeout_c),
		       jiffies_to_usecs(chip->timeout_d),
		       chip->timeout_adjusted
		       ? "adjusted" : "original");
}
示例#4
0
static ssize_t durations_show(struct device *dev, struct device_attribute *attr,
			      char *buf)
{
	struct tpm_chip *chip = to_tpm_chip(dev);

	if (chip->duration[TPM_LONG] == 0)
		return 0;

	return sprintf(buf, "%d %d %d [%s]\n",
		       jiffies_to_usecs(chip->duration[TPM_SHORT]),
		       jiffies_to_usecs(chip->duration[TPM_MEDIUM]),
		       jiffies_to_usecs(chip->duration[TPM_LONG]),
		       chip->duration_adjusted
		       ? "adjusted" : "original");
}
示例#5
0
文件: sch_fq.c 项目: AK101111/linux
static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	struct nlattr *opts;

	opts = nla_nest_start(skb, TCA_OPTIONS);
	if (opts == NULL)
		goto nla_put_failure;

	/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */

	if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
	    nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
	    nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
	    nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
	    nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
	    nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
	    nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
			jiffies_to_usecs(q->flow_refill_delay)) ||
	    nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
	    nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
		goto nla_put_failure;

	return nla_nest_end(skb, opts);

nla_put_failure:
	return -1;
}
示例#6
0
/**
 * congestion_wait - wait for a backing_dev to become uncongested
 * @sync: SYNC or ASYNC IO
 * @timeout: timeout in jiffies
 *
 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
 * write congestion.  If no backing_devs are congested then just wait for the
 * next write to be completed.
 */
long congestion_wait(int sync, long timeout)
{
	long ret;
	unsigned long start = jiffies;
	DEFINE_WAIT(wait);
	wait_queue_head_t *wqh = &congestion_wqh[sync];

	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
	ret = io_schedule_timeout(timeout);
	finish_wait(wqh, &wait);

	trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
					jiffies_to_usecs(jiffies - start));

	return ret;
}
示例#7
0
文件: sch_pie.c 项目: giwa/fq-pie
static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct pie_sched_data *q = qdisc_priv(sch);
	struct nlattr *opts;

	opts = nla_nest_start(skb, TCA_OPTIONS);
	if (opts == NULL)
		goto nla_put_failure;

	/* convert target from pschedtime to us */
	if (nla_put_u32(skb, TCA_PIE_TARGET,
			((u32) PSCHED_TICKS2NS(q->params.target)) /
			NSEC_PER_USEC) ||
	    nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
	    nla_put_u32(skb, TCA_PIE_TUPDATE, jiffies_to_usecs(q->params.tupdate)) ||
	    nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
	    nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
	    nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
	    nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode))
		goto nla_put_failure;

	return nla_nest_end(skb, opts);

nla_put_failure:
	nla_nest_cancel(skb, opts);
	return -1;

}
示例#8
0
/* Clear any keys in the buffer */
static void cros_ec_keyb_clear_keyboard(struct cros_ec_keyb *ckdev)
{
	uint8_t old_state[ckdev->cols];
	uint8_t new_state[ckdev->cols];
	unsigned long duration;
	int i, ret;

	/*
	 * Keep reading until we see that the scan state does not change.
	 * That indicates that we are done.
	 *
	 * Assume that the EC keyscan buffer is at most 32 deep.
	 */
	duration = jiffies;
	ret = cros_ec_keyb_get_state(ckdev, new_state);
	for (i = 1; !ret && i < 32; i++) {
		memcpy(old_state, new_state, sizeof(old_state));
		ret = cros_ec_keyb_get_state(ckdev, new_state);
		if (0 == memcmp(old_state, new_state, sizeof(old_state)))
			break;
	}
	duration = jiffies - duration;
	dev_info(ckdev->dev, "Discarded %d keyscan(s) in %dus\n", i,
		jiffies_to_usecs(duration));
}
示例#9
0
static int cs_init(struct dbs_data *dbs_data, bool notify)
{
	struct cs_dbs_tuners *tuners;

	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
	if (!tuners) {
		pr_err("%s: kzalloc failed\n", __func__);
		return -ENOMEM;
	}

	tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
	tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
	tuners->ignore_nice_load = 0;
	tuners->freq_step = DEF_FREQUENCY_STEP;

	dbs_data->tuners = tuners;
	dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
		jiffies_to_usecs(10);

	if (notify)
		cpufreq_register_notifier(&cs_cpufreq_notifier_block,
					  CPUFREQ_TRANSITION_NOTIFIER);

	return 0;
}
示例#10
0
static void wl1271_fw_status(struct wl1271 *wl, struct wl1271_fw_status *status)
{
	u32 total = 0;
	int i;

	

	wl1271_spi_mem_read(wl, STATUS_MEM_ADDRESS, status, sizeof(*status));

	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
		     "drv_rx_counter = %d, tx_results_counter = %d)",
		     status->intr,
		     status->fw_rx_counter,
		     status->drv_rx_counter,
		     status->tx_results_counter);

	
	for (i = 0; i < NUM_TX_QUEUES; i++) {
		u32 cnt = status->tx_released_blks[i] - wl->tx_blocks_freed[i];
		wl->tx_blocks_freed[i] = status->tx_released_blks[i];
		wl->tx_blocks_available += cnt;
		total += cnt;
	}

	
	if (total && !skb_queue_empty(&wl->tx_queue))
		schedule_work(&wl->tx_work);

	
	wl->time_offset = jiffies_to_usecs(jiffies) - status->fw_localtime;
}
示例#11
0
static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
			      u32 extra, struct ieee80211_tx_info *control)
{
	struct wl1271_tx_hw_descr *desc;
	int pad;

	desc = (struct wl1271_tx_hw_descr *) skb->data;

	/* configure packet life time */
	desc->start_time = jiffies_to_usecs(jiffies) - wl->time_offset;
	desc->life_time = TX_HW_MGMT_PKT_LIFETIME_TU;

	/* configure the tx attributes */
	desc->tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
	/* FIXME: do we know the packet priority? can we identify mgmt
	   packets, and use max prio for them at least? */
	desc->tid = 0;
	desc->aid = TX_HW_DEFAULT_AID;
	desc->reserved = 0;

	/* align the length (and store in terms of words) */
	pad = WL1271_TX_ALIGN(skb->len);
	desc->length = pad >> 2;

	/* calculate number of padding bytes */
	pad = pad - skb->len;
	desc->tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;

	wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
	return 0;
}
示例#12
0
/* Extract info for Tcp socket info provided via netlink. */
static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr,
				union tcp_cc_info *info)
{
	const struct westwood *ca = inet_csk_ca(sk);

	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
		info->vegas.tcpv_enabled = 1;
		info->vegas.tcpv_rttcnt	= 0;
		info->vegas.tcpv_rtt	= jiffies_to_usecs(ca->rtt),
		info->vegas.tcpv_minrtt	= jiffies_to_usecs(ca->rtt_min),

		*attr = INET_DIAG_VEGASINFO;
		return sizeof(struct tcpvegas_info);
	}
	return 0;
}
示例#13
0
static int __init cpufreq_gov_dbs_init(void)
{
	u64 idle_time;
	int cpu = get_cpu();

	mutex_init(&od_dbs_data.mutex);
	idle_time = get_cpu_idle_time_us(cpu, NULL);
	put_cpu();
	if (idle_time != -1ULL) {
		/* Idle micro accounting is supported. Use finer thresholds */
		od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
		/*
		 * In nohz/micro accounting case we set the minimum frequency
		 * not depending on HZ, but fixed (very low). The deferred
		 * timer might skip some samples if idle/sleeping as needed.
		*/
		od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
	} else {
		/* For correct statistics, we need 10 ticks for each measure */
		od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
			jiffies_to_usecs(10);
	}

	return cpufreq_register_governor(&cpufreq_gov_ondemand);
}
示例#14
0
/**
 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
 * @zone: A zone to check if it is heavily congested
 * @sync: SYNC or ASYNC IO
 * @timeout: timeout in jiffies
 *
 * In the event of a congested backing_dev (any backing_dev) and the given
 * @zone has experienced recent congestion, this waits for up to @timeout
 * jiffies for either a BDI to exit congestion of the given @sync queue
 * or a write to complete.
 *
 * In the absence of zone congestion, a short sleep or a cond_resched is
 * performed to yield the processor and to allow other subsystems to make
 * a forward progress.
 *
 * The return value is 0 if the sleep is for the full timeout. Otherwise,
 * it is the number of jiffies that were still remaining when the function
 * returned. return_value == timeout implies the function did not sleep.
 */
long wait_iff_congested(struct zone *zone, int sync, long timeout)
{
	long ret;
	unsigned long start = jiffies;
	DEFINE_WAIT(wait);
	wait_queue_head_t *wqh = &congestion_wqh[sync];

	/*
	 * If there is no congestion, or heavy congestion is not being
	 * encountered in the current zone, yield if necessary instead
	 * of sleeping on the congestion queue
	 */
	if (atomic_read(&nr_wb_congested[sync]) == 0 ||
	    !test_bit(ZONE_CONGESTED, &zone->flags)) {

		/*
		 * Memory allocation/reclaim might be called from a WQ
		 * context and the current implementation of the WQ
		 * concurrency control doesn't recognize that a particular
		 * WQ is congested if the worker thread is looping without
		 * ever sleeping. Therefore we have to do a short sleep
		 * here rather than calling cond_resched().
		 */
		if (current->flags & PF_WQ_WORKER)
			schedule_timeout_uninterruptible(1);
		else
			cond_resched();

		/* In case we scheduled, work out time remaining */
		ret = timeout - (jiffies - start);
		if (ret < 0)
			ret = 0;

		goto out;
	}

	/* Sleep until uncongested or a write happens */
	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
	ret = io_schedule_timeout(timeout);
	finish_wait(wqh, &wait);

out:
	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
					jiffies_to_usecs(jiffies - start));

	return ret;
}
/* Extract info for Tcp socket info provided via netlink. */
static void tcp_westwood_info(struct sock *sk, u32 ext,
			      struct sk_buff *skb)
{
	const struct westwood *ca = inet_csk_ca(sk);
	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
		struct rtattr *rta;
		struct tcpvegas_info *info;

		rta = __RTA_PUT(skb, INET_DIAG_VEGASINFO, sizeof(*info));
		info = RTA_DATA(rta);
		info->tcpv_enabled = 1;
		info->tcpv_rttcnt = 0;
		info->tcpv_rtt = jiffies_to_usecs(ca->rtt);
		info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
	rtattr_failure:	;
	}
}
示例#16
0
文件: sched_edf.c 项目: Aand1/ROSCH
static void edf_start_account(resch_task_t *rt)
{
#ifdef NO_LINUX_LOAD_BALANCE
	setup_timer_on_stack(&rt->expire_timer, expire_handler, (unsigned long)rt);
	mod_timer(&rt->expire_timer, jiffies + rt->budget - exec_time(rt));	
#else
	rt->task->rt.timeout = 0;
	rt->task->signal->rlim[RLIMIT_RTTIME].rlim_cur = 
		jiffies_to_usecs(rt->budget - exec_time(rt));
#endif
}
示例#17
0
static void bcm_timer_compute_params(bcm_timer_t *t, unsigned long period_in_usecs)
{
   unsigned long real_period;

   bcm_pr_debug("bcm_timer_compute_params(period_in_usecs=%lu)\n", (unsigned long)(period_in_usecs));

   t->period_in_jiffies = usecs_to_jiffies(period_in_usecs);
   real_period = jiffies_to_usecs(t->period_in_jiffies);
   if (real_period < period_in_usecs) {
      t->period_in_jiffies += 1;
      real_period = jiffies_to_usecs(t->period_in_jiffies);
      bcm_assert(real_period >= period_in_usecs);
   }
   t->drift_increment = real_period - period_in_usecs;
   bcm_assert((0 == t->drift_increment) || ((t->drift_increment > 0) && (t->period_in_jiffies >= 1)));

   bcm_pr_debug("one_jiffie_to_usecs = %lu, period_in_jiffies = %lu, real_period = %lu, drift_increment = %lu\n",
      (unsigned long)(one_jiffie_to_usecs), (unsigned long)(t->period_in_jiffies),
      (unsigned long)(real_period), (unsigned long)(t->drift_increment));
}
示例#18
0
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
	u64 idle_time;
	u64 cur_wall_time;
	u64 busy_time;

	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());

	busy_time  = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];

	idle_time = cur_wall_time - busy_time;
	if (wall)
		*wall = jiffies_to_usecs(cur_wall_time);

	return jiffies_to_usecs(idle_time);
}
示例#19
0
/* Extract info for Tcp socket info provided via netlink. */
static void tcp_westwood_info(struct sock *sk, u32 ext,
			      struct sk_buff *skb)
{
	const struct westwood *ca = inet_csk_ca(sk);
	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
		struct tcpvegas_info info = {
			.tcpv_enabled = 1,
			.tcpv_rtt = jiffies_to_usecs(ca->rtt),
			.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
		};

		nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
	}
}


static struct tcp_congestion_ops tcp_westwood = {
	.init		= tcp_westwood_init,
	.ssthresh	= tcp_reno_ssthresh,
	.cong_avoid	= tcp_reno_cong_avoid,
	.min_cwnd	= tcp_westwood_bw_rttmin,
	.cwnd_event	= tcp_westwood_event,
	.get_info	= tcp_westwood_info,
	.pkts_acked	= tcp_westwood_pkts_acked,

	.owner		= THIS_MODULE,
	.name		= "westwood"
};

static int __init tcp_westwood_register(void)
{
	BUILD_BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
	return tcp_register_congestion_control(&tcp_westwood);
}

static void __exit tcp_westwood_unregister(void)
{
	tcp_unregister_congestion_control(&tcp_westwood);
}
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
						  cputime64_t *wall)
{
	cputime64_t idle_time;
	cputime64_t cur_wall_time;
	cputime64_t busy_time;

	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
			kstat_cpu(cpu).cpustat.system);

	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);

	idle_time = cputime64_sub(cur_wall_time, busy_time);
	if (wall)
		*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);

	return (cputime64_t)jiffies_to_usecs(idle_time);
}
示例#21
0
int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
{
	s64 tmp;
	struct timespec ts;
	unsigned long t1,t2,t3;
	unsigned long flags;

	/* Though tsk->delays accessed later, early exit avoids
	 * unnecessary returning of other data
	 */
	if (!tsk->delays)
		goto done;

	tmp = (s64)d->cpu_run_real_total;
	cputime_to_timespec(tsk->utime + tsk->stime, &ts);
	tmp += timespec_to_ns(&ts);
	d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;

	/*
	 * No locking available for sched_info (and too expensive to add one)
	 * Mitigate by taking snapshot of values
	 */
	t1 = tsk->sched_info.pcnt;
	t2 = tsk->sched_info.run_delay;
	t3 = tsk->sched_info.cpu_time;

	d->cpu_count += t1;

	jiffies_to_timespec(t2, &ts);
	tmp = (s64)d->cpu_delay_total + timespec_to_ns(&ts);
	d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;

	tmp = (s64)d->cpu_run_virtual_total + (s64)jiffies_to_usecs(t3) * 1000;
	d->cpu_run_virtual_total =
		(tmp < (s64)d->cpu_run_virtual_total) ?	0 : tmp;

	/* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */

	spin_lock_irqsave(&tsk->delays->lock, flags);
	tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
	d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
	tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
	d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
	d->blkio_count += tsk->delays->blkio_count;
	d->swapin_count += tsk->delays->swapin_count;
	spin_unlock_irqrestore(&tsk->delays->lock, flags);

done:
	return 0;
}
static int od_init(struct dbs_data *dbs_data)
{
	struct od_dbs_tuners *tuners;
	u64 idle_time;
	int cpu;

	tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
	if (!tuners) {
		pr_err("%s: kzalloc failed\n", __func__);
		return -ENOMEM;
	}

	cpu = get_cpu();
	idle_time = get_cpu_idle_time_us(cpu, NULL);
	put_cpu();
	if (idle_time != -1ULL) {
		/* Idle micro accounting is supported. Use finer thresholds */
		tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
		tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
			MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
#ifdef CONFIG_ARCH_HI6XXX
        tuners->od_6xxx_up_threshold = HI6XXX_FREQUENCY_UP_THRESHOLD;
        tuners->od_6xxx_down_threshold = HI6XXX_FREQUENCY_DOWN_THRESHOLD;
#endif
		/*
		 * In nohz/micro accounting case we set the minimum frequency
		 * not depending on HZ, but fixed (very low). The deferred
		 * timer might skip some samples if idle/sleeping as needed.
		*/
		dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
	} else {
		tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
		tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
			DEF_FREQUENCY_DOWN_DIFFERENTIAL;

		/* For correct statistics, we need 10 ticks for each measure */
		dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
			jiffies_to_usecs(10);
	}

	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
	tuners->ignore_nice_load = 0;
	tuners->powersave_bias = default_powersave_bias;
	tuners->io_is_busy = should_io_be_busy();

	dbs_data->tuners = tuners;
	mutex_init(&dbs_data->mutex);
	return 0;
}
static void report_timing(void)
{
	unsigned long since = jiffies - timing_stats.last_report_time;

	
	if (since >= HZ) {
		int first = (timing_stats.last_report_time == 0);

		timing_stats.last_report_time = jiffies;
		if (!first)
			printk(KERN_INFO IPWIRELESS_PCCARD_NAME
			       ": %u us elapsed - read %lu bytes in %u us, wrote %lu bytes in %u us\n",
			       jiffies_to_usecs(since),
			       timing_stats.read_bytes,
			       jiffies_to_usecs(timing_stats.read_time),
			       timing_stats.write_bytes,
			       jiffies_to_usecs(timing_stats.write_time));

		timing_stats.read_time = 0;
		timing_stats.write_time = 0;
		timing_stats.read_bytes = 0;
		timing_stats.write_bytes = 0;
	}
}
static void inline cpufreq_greenmax_calc_load(int j)
{
	struct greenmax_info_s *j_this_greenmax;
	u64 cur_wall_time, cur_idle_time, cur_iowait_time;
	unsigned int idle_time, wall_time, iowait_time;
	unsigned int cur_load;

	j_this_greenmax = &per_cpu(greenmax_info, j);

	cur_idle_time = get_cpu_idle_time_greenmax(j, &cur_wall_time);
	cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);

	wall_time = cur_wall_time - j_this_greenmax->prev_cpu_wall;
	j_this_greenmax->prev_cpu_wall = cur_wall_time;

	idle_time = cur_idle_time - j_this_greenmax->prev_cpu_idle;
	j_this_greenmax->prev_cpu_idle = cur_idle_time;

	iowait_time = cur_iowait_time - j_this_greenmax->prev_cpu_iowait;
	j_this_greenmax->prev_cpu_iowait = cur_iowait_time;

	if (ignore_nice) {
		u64 cur_nice;
		unsigned long cur_nice_jiffies;

		cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - j_this_greenmax->prev_cpu_nice;
		cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice);

		j_this_greenmax->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];

		idle_time += jiffies_to_usecs(cur_nice_jiffies);
	}

	/*
	 * For the purpose of ondemand, waiting for disk IO is an
	 * indication that you're performance critical, and not that
	 * the system is actually idle. So subtract the iowait time
	 * from the cpu idle time.
	 */
	if (io_is_busy && idle_time >= iowait_time)
		idle_time -= iowait_time;

	if (unlikely(!wall_time || wall_time < idle_time))
		return;

	cur_load = 100 * (wall_time - idle_time) / wall_time;
	j_this_greenmax->cur_cpu_load = cur_load;
}
示例#25
0
/**
 * timespec_trunc - Truncate timespec to a granularity
 * @t: Timespec
 * @gran: Granularity in ns.
 *
 * Truncate a timespec to a granularity. gran must be smaller than a second.
 * Always rounds down.
 *
 * This function should be only used for timestamps returned by
 * current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because
 * it doesn't handle the better resolution of the later.
 */
struct timespec timespec_trunc(struct timespec t, unsigned gran)
{
	/*
	 * Division is pretty slow so avoid it for common cases.
	 * Currently current_kernel_time() never returns better than
	 * jiffies resolution. Exploit that.
	 */
	if (gran <= jiffies_to_usecs(1) * 1000) {
		/* nothing */
	} else if (gran == 1000000000) {
		t.tv_nsec = 0;
	} else {
		t.tv_nsec -= t.tv_nsec % gran;
	}
	return t;
}
示例#26
0
static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
	int status = NETDEV_TX_BUSY;
	unsigned long tries;
	struct net_device *dev = np->dev;
	struct netpoll_info *npinfo = np->dev->npinfo;

	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
		__kfree_skb(skb);
		return;
	}

	/* don't get messages out of order, and no recursion */
	if (skb_queue_len(&npinfo->txq) == 0 &&
		    npinfo->poll_owner != smp_processor_id()) {
		unsigned long flags;

		local_irq_save(flags);
		/* try until next clock tick */
		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
		     tries > 0; --tries) {
			if (netif_tx_trylock(dev)) {
				if (!netif_queue_stopped(dev))
					status = dev->hard_start_xmit(skb, dev);
				netif_tx_unlock(dev);

				if (status == NETDEV_TX_OK)
					break;

			}

			/* tickle device maybe there is some cleanup */
			netpoll_poll(np);

			udelay(USEC_PER_POLL);
		}
		local_irq_restore(flags);
	}

	if (status != NETDEV_TX_OK) {
		skb_queue_tail(&npinfo->txq, skb);
		schedule_delayed_work(&npinfo->tx_work,0);
	}
}
示例#27
0
文件: system.c 项目: twobob/KK_kernel
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
{
	cputime64_t idle_time;
	cputime64_t cur_jiffies;
	cputime64_t busy_time;

	cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
			kstat_cpu(cpu).cpustat.system);

	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);

	busy_time = cputime64_add(busy_time,
			kstat_cpu(cpu).cpustat.nice);
	idle_time = cputime64_sub(cur_jiffies, busy_time);
	return jiffies_to_usecs(idle_time);
}
示例#28
0
int __init bcm_timer_init(bcm_timer_t *t, void (*callback)(bcm_timer_t *t))
{
   int ret = 0;

   bcm_pr_debug("bcm_timer_init()\n");

   bcm_assert(NULL != callback);

   one_jiffie_to_usecs = jiffies_to_usecs(1);

   t->callback = callback;
   t->period_in_jiffies = msecs_to_jiffies(1000);

   // Init kernel timer
   init_timer(&(t->kobject));
   t->kobject.function = bcm_timer_fn;
   t->kobject.data = (unsigned long)(t);

   return (ret);
}
static int cs_init(struct dbs_data *dbs_data)
{
	struct cs_dbs_tuners *tuners;

	tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL);
	if (!tuners) {
		pr_err("%s: kzalloc failed\n", __func__);
		return -ENOMEM;
	}

	tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
	tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
	tuners->ignore_nice = 0;
	tuners->freq_step = DEF_FREQUENCY_STEP;

	dbs_data->tuners = tuners;
	dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
		jiffies_to_usecs(10);
	mutex_init(&dbs_data->mutex);
	return 0;
}
示例#30
0
static unsigned int calc_cur_load(unsigned int cpu)
{
	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
	u64 cur_wall_time, cur_idle_time, cur_iowait_time;
	unsigned int idle_time, wall_time, iowait_time;

	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
	cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);

	wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
	pcpu->prev_cpu_wall = cur_wall_time;

	idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
	pcpu->prev_cpu_idle = cur_idle_time;

	iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
	pcpu->prev_cpu_iowait = cur_iowait_time;

	if (ignore_nice) {
		u64 cur_nice;
		unsigned long cur_nice_jiffies;

		cur_nice = kcpustat_cpu(cpu).cpustat[CPUTIME_NICE] - pcpu->prev_cpu_nice;
		cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice);

		pcpu->prev_cpu_nice = kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];

		idle_time += jiffies_to_usecs(cur_nice_jiffies);
	}

	if (io_is_busy && idle_time >= iowait_time)
		idle_time -= iowait_time;

	if (unlikely(!wall_time || wall_time < idle_time))
		return 0;

	return 100 * (wall_time - idle_time) / wall_time;
}