Example #1
0
/*
 * Must be called with interrupts disabled !
 */
static void tick_do_update_jiffies64(ktime_t now)
{
	unsigned long ticks = 0;
	ktime_t delta;

	/*
	 * Do a quick check without holding xtime_lock:
	 */
	delta = ktime_sub(now, last_jiffies_update);
	if (delta.tv64 < tick_period.tv64)
		return;

	/* Reevalute with xtime_lock held */
	write_seqlock(&xtime_lock);

	delta = ktime_sub(now, last_jiffies_update);
	if (delta.tv64 >= tick_period.tv64) {

		delta = ktime_sub(delta, tick_period);
		last_jiffies_update = ktime_add(last_jiffies_update,
						tick_period);

		/* Slow path for long timeouts */
		if (unlikely(delta.tv64 >= tick_period.tv64)) {
			s64 incr = ktime_to_ns(tick_period);

			ticks = ktime_divns(delta, incr);

			last_jiffies_update = ktime_add_ns(last_jiffies_update,
							   incr * ticks);
		}
		do_timer(++ticks);

		/* Keep the tick_next_period variable up to date */
		tick_next_period = ktime_add(last_jiffies_update, tick_period);
	}
	write_sequnlock(&xtime_lock);
}
/**
 * pch_i2c_wait_for_bus_idle() - check the status of bus.
 * @adap:	Pointer to struct i2c_algo_pch_data.
 * @timeout:	waiting time counter (us).
 */
static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap,
				     s32 timeout)
{
	void __iomem *p = adap->pch_base_address;
	ktime_t ns_val;

	if ((ioread32(p + PCH_I2CSR) & I2CMBB_BIT) == 0)
		return 0;

	/* MAX timeout value is timeout*1000*1000nsec */
	ns_val = ktime_add_ns(ktime_get(), timeout*1000*1000);
	do {
		msleep(20);
		if ((ioread32(p + PCH_I2CSR) & I2CMBB_BIT) == 0)
			return 0;
	} while (ktime_lt(ktime_get(), ns_val));

	pch_dbg(adap, "I2CSR = %x\n", ioread32(p + PCH_I2CSR));
	pch_err(adap, "%s: Timeout Error.return%d\n", __func__, -ETIME);
	pch_i2c_init(adap);

	return -ETIME;
}
Example #3
0
/**
 * hrtimer_forward - forward the timer expiry
 * @timer:	hrtimer to forward
 * @now:	forward past this time
 * @interval:	the interval to forward
 *
 * Forward the timer expiry so it will expire in the future.
 * Returns the number of overruns.
 */
unsigned long
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
	unsigned long orun = 1;
	ktime_t delta;

	delta = ktime_sub(now, timer->expires);

	if (delta.tv64 < 0)
		return 0;

	if (interval.tv64 < timer->base->resolution.tv64)
		interval.tv64 = timer->base->resolution.tv64;

	if (unlikely(delta.tv64 >= interval.tv64)) {
		s64 incr = ktime_to_ns(interval);

		orun = ktime_divns(delta, incr);
		timer->expires = ktime_add_ns(timer->expires, incr * orun);
		if (timer->expires.tv64 > now.tv64)
			return orun;
		/*
		 * This (and the ktime_add() below) is the
		 * correction for exact:
		 */
		orun++;
	}
	timer->expires = ktime_add(timer->expires, interval);
	/*
	 * Make sure, that the result did not wrap with a very large
	 * interval.
	 */
	if (timer->expires.tv64 < 0)
		timer->expires = ktime_set(KTIME_SEC_MAX, 0);

	return orun;
}
enum hrtimer_restart hrtimer_auto_trigger_func(struct hrtimer *p_timer)
{ 

	//printk("The function SMI hrtimer_auto_trigger_func, times is %lu\n", hrtimer_trigger_times); 

	//before Linux 2.6.3 //hrtimer_auto_trigger.expires=ktime_add_ns(hrtimer_auto_trigger.expires,(u64)(hrtimer_smi_time_interval*1000000));
	hrtimer_set_expires(&hrtimer_auto_trigger, ktime_add_ns(hrtimer_get_expires(&hrtimer_auto_trigger),(u64)(hrtimer_smi_time_interval*1000000)));
	
	//Get SMI data
	SMI_Manual_Trigger_Result(hrtimer_cfg, hrtimer_cfg_ex, & hrtimer_result);
	auto_trigger_result[hrtimer_trigger_times] = hrtimer_result;
	
	//Update Conuter 
	hrtimer_trigger_times ++;
	
	if(hrtimer_trigger_times>=hrtimer_smi_time_count)
	{
		return HRTIMER_NORESTART;
	}
	else
	{		
		return HRTIMER_RESTART;
	}
}
Example #5
0
int journal_stop(handle_t *handle)
{
	transaction_t *transaction = handle->h_transaction;
	journal_t *journal = transaction->t_journal;
	int err;
	pid_t pid;

	J_ASSERT(journal_current_handle() == handle);

	if (is_handle_aborted(handle))
		err = -EIO;
	else {
		J_ASSERT(transaction->t_updates > 0);
		err = 0;
	}

	if (--handle->h_ref > 0) {
		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
			  handle->h_ref);
		return err;
	}

	jbd_debug(4, "Handle %p going down\n", handle);

	/*
	 * Implement synchronous transaction batching.  If the handle
	 * was synchronous, don't force a commit immediately.  Let's
	 * yield and let another thread piggyback onto this transaction.
	 * Keep doing that while new threads continue to arrive.
	 * It doesn't cost much - we're about to run a commit and sleep
	 * on IO anyway.  Speeds up many-threaded, many-dir operations
	 * by 30x or more...
	 *
	 * We try and optimize the sleep time against what the underlying disk
	 * can do, instead of having a static sleep time.  This is usefull for
	 * the case where our storage is so fast that it is more optimal to go
	 * ahead and force a flush and wait for the transaction to be committed
	 * than it is to wait for an arbitrary amount of time for new writers to
	 * join the transaction.  We achieve this by measuring how long it takes
	 * to commit a transaction, and compare it with how long this
	 * transaction has been running, and if run time < commit time then we
	 * sleep for the delta and commit.  This greatly helps super fast disks
	 * that would see slowdowns as more threads started doing fsyncs.
	 *
	 * But don't do this if this process was the most recent one to
	 * perform a synchronous write.  We do this to detect the case where a
	 * single process is doing a stream of sync writes.  No point in waiting
	 * for joiners in that case.
	 */
	pid = current->pid;
	if (handle->h_sync && journal->j_last_sync_writer != pid) {
		u64 commit_time, trans_time;

		journal->j_last_sync_writer = pid;

		spin_lock(&journal->j_state_lock);
		commit_time = journal->j_average_commit_time;
		spin_unlock(&journal->j_state_lock);

		trans_time = ktime_to_ns(ktime_sub(ktime_get(),
						   transaction->t_start_time));

		commit_time = min_t(u64, commit_time,
				    1000*jiffies_to_usecs(1));

		if (trans_time < commit_time) {
			ktime_t expires = ktime_add_ns(ktime_get(),
						       commit_time);
			set_current_state(TASK_UNINTERRUPTIBLE);
			schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
		}
	}

	if (handle->h_sync)
		transaction->t_synchronous_commit = 1;
	current->journal_info = NULL;
	spin_lock(&journal->j_state_lock);
	spin_lock(&transaction->t_handle_lock);
	transaction->t_outstanding_credits -= handle->h_buffer_credits;
	transaction->t_updates--;
	if (!transaction->t_updates) {
		wake_up(&journal->j_wait_updates);
		if (journal->j_barrier_count)
			wake_up(&journal->j_wait_transaction_locked);
	}

	/*
	 * If the handle is marked SYNC, we need to set another commit
	 * going!  We also want to force a commit if the current
	 * transaction is occupying too much of the log, or if the
	 * transaction is too old now.
	 */
	if (handle->h_sync ||
			transaction->t_outstanding_credits >
				journal->j_max_transaction_buffers ||
			time_after_eq(jiffies, transaction->t_expires)) {
		/* Do this even for aborted journals: an abort still
		 * completes the commit thread, it just doesn't write
		 * anything to disk. */
		tid_t tid = transaction->t_tid;

		spin_unlock(&transaction->t_handle_lock);
		jbd_debug(2, "transaction too old, requesting commit for "
					"handle %p\n", handle);
		/* This is non-blocking */
		__log_start_commit(journal, transaction->t_tid);
		spin_unlock(&journal->j_state_lock);

		/*
		 * Special case: JFS_SYNC synchronous updates require us
		 * to wait for the commit to complete.
		 */
		if (handle->h_sync && !(current->flags & PF_MEMALLOC))
			err = log_wait_commit(journal, tid);
	} else {
		spin_unlock(&transaction->t_handle_lock);
		spin_unlock(&journal->j_state_lock);
	}

	lock_map_release(&handle->h_lockdep_map);

	jbd_free_handle(handle);
	return err;
}
static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
	unsigned long rcu_delta_jiffies;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	now = tick_nohz_start_idle(cpu, ts);

	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		return;

	if (need_resched())
		return;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       (unsigned int) local_softirq_pending());
			ratelimit++;
		}
		return;
	}

	ts->idle_calls++;
	
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;
		time_delta = timekeeping_max_deferment();
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
		if (rcu_delta_jiffies < delta_jiffies) {
			next_jiffies = last_jiffies + rcu_delta_jiffies;
			delta_jiffies = rcu_delta_jiffies;
		}
	}
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	
	if ((long)delta_jiffies >= 1) {

		if (cpu == tick_do_timer_cpu) {
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
			ts->do_timer_last = 1;
		} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
			time_delta = KTIME_MAX;
			ts->do_timer_last = 0;
		} else if (!ts->do_timer_last) {
			time_delta = KTIME_MAX;
		}

		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
		}

		if (time_delta < KTIME_MAX)
			expires = ktime_add_ns(last_update, time_delta);
		else
			expires.tv64 = KTIME_MAX;

		
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		if (!ts->tick_stopped) {
			select_nohz_load_balancer(1);

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
		}

		ts->idle_sleeps++;

		
		ts->idle_expires = expires;

		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
			goto out;
		}

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS_PINNED);
			
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if (!tick_program_event(expires, 0))
				goto out;
		tick_do_update_jiffies64(ktime_get());
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
}
Example #7
0
/**
 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
 *
 * When the next event is more than a tick into the future, stop the idle tick
 * Called either from the idle loop or from irq_exit() when an idle period was
 * just interrupted by an interrupt which did not cause a reschedule.
 */
void tick_nohz_stop_sched_tick(void)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
	struct tick_sched *ts;
	ktime_t last_update, expires, now, delta;
	int cpu;

	local_irq_save(flags);

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		goto end;

	if (need_resched())
		goto end;

	cpu = smp_processor_id();
	if (unlikely(local_softirq_pending()))
		printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
		       local_softirq_pending());

	now = ktime_get();
	/*
	 * When called from irq_exit we need to account the idle sleep time
	 * correctly.
	 */
	if (ts->tick_stopped) {
		delta = ktime_sub(now, ts->idle_entrytime);
		ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
	}

	ts->idle_entrytime = now;
	ts->idle_calls++;

	/* Read jiffies and the time when jiffies were updated last */
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;
	} while (read_seqretry(&xtime_lock, seq));

	/* Get the next timer wheel timer */
	next_jiffies = get_next_timer_interrupt(last_jiffies);
	delta_jiffies = next_jiffies - last_jiffies;

	if (rcu_needs_cpu(cpu))
		delta_jiffies = 1;
	/*
	 * Do not stop the tick, if we are only one off
	 * or if the cpu is required for rcu
	 */
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	/* Schedule the tick, if we are at least one jiffie off */
	if ((long)delta_jiffies >= 1) {

		if (delta_jiffies > 1)
			cpu_set(cpu, nohz_cpu_mask);
		/*
		 * nohz_stop_sched_tick can be called several times before
		 * the nohz_restart_sched_tick is called. This happens when
		 * interrupts arrive which do not cause a reschedule. In the
		 * first call we save the current tick time, so we can restart
		 * the scheduler tick in nohz_restart_sched_tick.
		 */
		if (!ts->tick_stopped) {
			ts->idle_tick = ts->sched_timer.expires;
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
		}

		/*
		 * If this cpu is the one which updates jiffies, then
		 * give up the assignment and let it be taken by the
		 * cpu which runs the tick timer next, which might be
		 * this cpu as well. If we don't drop this here the
		 * jiffies might be stale and do_timer() never
		 * invoked.
		 */
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = -1;

		/*
		 * calculate the expiry time for the next timer wheel
		 * timer
		 */
		expires = ktime_add_ns(last_update, tick_period.tv64 *
				       delta_jiffies);
		ts->idle_expires = expires;
		ts->idle_sleeps++;

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if(!tick_program_event(expires, 0))
				goto out;
		/*
		 * We are past the event already. So we crossed a
		 * jiffie boundary. Update jiffies and raise the
		 * softirq.
		 */
		tick_do_update_jiffies64(ktime_get());
		cpu_clear(cpu, nohz_cpu_mask);
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
end:
	local_irq_restore(flags);
}
Example #8
0
static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	now = tick_nohz_start_idle(cpu, ts);

	/*
	 * If this cpu is offline and it is the one which updates
	 * jiffies, then give up the assignment and let it be taken by
	 * the cpu which runs the tick timer next. If we don't drop
	 * this here the jiffies might be stale and do_timer() never
	 * invoked.
	 */
	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		return;

	if (need_resched())
		return;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       (unsigned int) local_softirq_pending());
			ratelimit++;
		}
		return;
	}

	ts->idle_calls++;
	/* Read jiffies and the time when jiffies were updated last */
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;
		time_delta = timekeeping_max_deferment();
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		/* Get the next timer wheel timer */
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
	}
	/*
	 * Do not stop the tick, if we are only one off
	 * or if the cpu is required for rcu
	 */
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	/* Schedule the tick, if we are at least one jiffie off */
	if ((long)delta_jiffies >= 1) {

		/*
		 * If this cpu is the one which updates jiffies, then
		 * give up the assignment and let it be taken by the
		 * cpu which runs the tick timer next, which might be
		 * this cpu as well. If we don't drop this here the
		 * jiffies might be stale and do_timer() never
		 * invoked. Keep track of the fact that it was the one
		 * which had the do_timer() duty last. If this cpu is
		 * the one which had the do_timer() duty last, we
		 * limit the sleep time to the timekeeping
		 * max_deferement value which we retrieved
		 * above. Otherwise we can sleep as long as we want.
		 */
		if (cpu == tick_do_timer_cpu) {
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
			ts->do_timer_last = 1;
		} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
			time_delta = KTIME_MAX;
			ts->do_timer_last = 0;
		} else if (!ts->do_timer_last) {
			time_delta = KTIME_MAX;
		}

		/*
		 * calculate the expiry time for the next timer wheel
		 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
		 * that there is no timer pending or at least extremely
		 * far into the future (12 days for HZ=1000). In this
		 * case we set the expiry to the end of time.
		 */
		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			/*
			 * Calculate the time delta for the next timer event.
			 * If the time delta exceeds the maximum time delta
			 * permitted by the current clocksource then adjust
			 * the time delta accordingly to ensure the
			 * clocksource does not wrap.
			 */
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
		}

		if (time_delta < KTIME_MAX)
			expires = ktime_add_ns(last_update, time_delta);
		else
			expires.tv64 = KTIME_MAX;

		/* Skip reprogram of event if its not changed */
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		/*
		 * nohz_stop_sched_tick can be called several times before
		 * the nohz_restart_sched_tick is called. This happens when
		 * interrupts arrive which do not cause a reschedule. In the
		 * first call we save the current tick time, so we can restart
		 * the scheduler tick in nohz_restart_sched_tick.
		 */
		if (!ts->tick_stopped) {
			select_nohz_load_balancer(1);
			calc_load_enter_idle();

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
		}

		ts->idle_sleeps++;

		/* Mark expires */
		ts->idle_expires = expires;

		/*
		 * If the expiration time == KTIME_MAX, then
		 * in this case we simply stop the tick timer.
		 */
		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
			goto out;
		}

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS_PINNED);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if (!tick_program_event(expires, 0))
				goto out;
		/*
		 * We are past the event already. So we crossed a
		 * jiffie boundary. Update jiffies and raise the
		 * softirq.
		 */
		tick_do_update_jiffies64(ktime_get());
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
}
Example #9
0
void hrtimer_interrupt(struct clock_event_device *dev)
{
    struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
    ktime_t expires_next, now, entry_time, delta;
    int i, retries = 0;

    BUG_ON(!cpu_base->hres_active);
    cpu_base->nr_events++;
    dev->next_event.tv64 = KTIME_MAX;

    raw_spin_lock(&cpu_base->lock);
    entry_time = now = hrtimer_update_base(cpu_base);
retry:
    expires_next.tv64 = KTIME_MAX;
    cpu_base->expires_next.tv64 = KTIME_MAX;

    for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
        struct hrtimer_clock_base *base;
        struct timerqueue_node *node;
        ktime_t basenow;

        if (!(cpu_base->active_bases & (1 << i)))
            continue;

        base = cpu_base->clock_base + i;
        basenow = ktime_add(now, base->offset);

        while ((node = timerqueue_getnext(&base->active))) {
            struct hrtimer *timer;

            timer = container_of(node, struct hrtimer, node);


            if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
                ktime_t expires;

                expires = ktime_sub(hrtimer_get_expires(timer),
                                    base->offset);
                if (expires.tv64 < expires_next.tv64)
                    expires_next = expires;
                break;
            }

            __run_hrtimer(timer, &basenow);
        }
    }

    cpu_base->expires_next = expires_next;
    raw_spin_unlock(&cpu_base->lock);


    if (expires_next.tv64 == KTIME_MAX ||
            !tick_program_event(expires_next, 0)) {
        cpu_base->hang_detected = 0;
        return;
    }

    raw_spin_lock(&cpu_base->lock);
    now = hrtimer_update_base(cpu_base);
    cpu_base->nr_retries++;
    if (++retries < 3)
        goto retry;
    cpu_base->nr_hangs++;
    cpu_base->hang_detected = 1;
    raw_spin_unlock(&cpu_base->lock);
    delta = ktime_sub(now, entry_time);
    if (delta.tv64 > cpu_base->max_hang_time.tv64)
        cpu_base->max_hang_time = delta;
    if (delta.tv64 > 100 * NSEC_PER_MSEC)
        expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
    else
        expires_next = ktime_add(now, delta);
    tick_program_event(expires_next, 1);
    printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
                ktime_to_ns(delta));
}
Example #10
0
/* timer_arm: as in "arm the timer", not as in ARM the company */
static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
{
	timer->armed = true;
	hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
		      HRTIMER_MODE_ABS);
}
tEplKernel PUBLIC EplTimerHighReskModifyTimerNs(tEplTimerHdl * pTimerHdl_p,
						unsigned long long ullTimeNs_p,
						tEplTimerkCallback
						pfnCallback_p,
						unsigned long ulArgument_p,
						BOOL fContinuously_p)
{
	tEplKernel Ret;
	unsigned int uiIndex;
	tEplTimerHighReskTimerInfo *pTimerInfo;
	ktime_t RelTime;

	Ret = kEplSuccessful;

	// check pointer to handle
	if (pTimerHdl_p == NULL) {
		Ret = kEplTimerInvalidHandle;
		goto Exit;
	}

	if (*pTimerHdl_p == 0) {	// no timer created yet

		// search free timer info structure
		pTimerInfo = &EplTimerHighReskInstance_l.m_aTimerInfo[0];
		for (uiIndex = 0; uiIndex < TIMER_COUNT;
		     uiIndex++, pTimerInfo++) {
			if (pTimerInfo->m_EventArg.m_TimerHdl == 0) {	// free structure found
				break;
			}
		}
		if (uiIndex >= TIMER_COUNT) {	// no free structure found
			Ret = kEplTimerNoTimerCreated;
			goto Exit;
		}

		pTimerInfo->m_EventArg.m_TimerHdl = HDL_INIT(uiIndex);
	} else {
		uiIndex = HDL_TO_IDX(*pTimerHdl_p);
		if (uiIndex >= TIMER_COUNT) {	// invalid handle
			Ret = kEplTimerInvalidHandle;
			goto Exit;
		}

		pTimerInfo = &EplTimerHighReskInstance_l.m_aTimerInfo[uiIndex];
	}

	/*
	 * increment timer handle
	 * (if timer expires right after this statement, the user
	 * would detect an unknown timer handle and discard it)
	 */
	pTimerInfo->m_EventArg.m_TimerHdl =
	    HDL_INC(pTimerInfo->m_EventArg.m_TimerHdl);
	*pTimerHdl_p = pTimerInfo->m_EventArg.m_TimerHdl;

	// reject too small time values
	if ((fContinuously_p && (ullTimeNs_p < TIMER_MIN_VAL_CYCLE))
	    || (!fContinuously_p && (ullTimeNs_p < TIMER_MIN_VAL_SINGLE))) {
		Ret = kEplTimerNoTimerCreated;
		goto Exit;
	}

	pTimerInfo->m_EventArg.m_ulArg = ulArgument_p;
	pTimerInfo->m_pfnCallback = pfnCallback_p;
	pTimerInfo->m_fContinuously = fContinuously_p;
	pTimerInfo->m_ullPeriod = ullTimeNs_p;

	/*
	 * HRTIMER_MODE_REL does not influence general handling of this timer.
	 * It only sets relative mode for this start operation.
	 * -> Expire time is calculated by: Now + RelTime
	 * hrtimer_start also skips pending timer events.
	 * The state HRTIMER_STATE_CALLBACK is ignored.
	 * We have to cope with that in our callback function.
	 */
	RelTime = ktime_add_ns(ktime_set(0, 0), ullTimeNs_p);
	hrtimer_start(&pTimerInfo->m_Timer, RelTime, HRTIMER_MODE_REL);

      Exit:
	return Ret;

}
Example #12
0
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
	ktime_t expires_next, now, entry_time, delta;
	int retries = 0;

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

	raw_spin_lock(&cpu_base->lock);
	entry_time = now = hrtimer_update_base(cpu_base);
retry:
	cpu_base->in_hrtirq = 1;
	/*
	 * We set expires_next to KTIME_MAX here with cpu_base->lock
	 * held to prevent that a timer is enqueued in our queue via
	 * the migration code. This does not affect enqueueing of
	 * timers which run their callback and need to be requeued on
	 * this CPU.
	 */
	cpu_base->expires_next.tv64 = KTIME_MAX;

	__hrtimer_run_queues(cpu_base, now);

	/* Reevaluate the clock bases for the next expiry */
	expires_next = __hrtimer_get_next_event(cpu_base);
	/*
	 * Store the new expiry value so the migration code can verify
	 * against it.
	 */
	cpu_base->expires_next = expires_next;
	cpu_base->in_hrtirq = 0;
	raw_spin_unlock(&cpu_base->lock);

	/* Reprogramming necessary ? */
	if (!tick_program_event(expires_next, 0)) {
		cpu_base->hang_detected = 0;
		return;
	}

	/*
	 * The next timer was already expired due to:
	 * - tracing
	 * - long lasting callbacks
	 * - being scheduled away when running in a VM
	 *
	 * We need to prevent that we loop forever in the hrtimer
	 * interrupt routine. We give it 3 attempts to avoid
	 * overreacting on some spurious event.
	 *
	 * Acquire base lock for updating the offsets and retrieving
	 * the current time.
	 */
	raw_spin_lock(&cpu_base->lock);
	now = hrtimer_update_base(cpu_base);
	cpu_base->nr_retries++;
	if (++retries < 3)
		goto retry;
	/*
	 * Give the system a chance to do something else than looping
	 * here. We stored the entry time, so we know exactly how long
	 * we spent here. We schedule the next event this amount of
	 * time away.
	 */
	cpu_base->nr_hangs++;
	cpu_base->hang_detected = 1;
	raw_spin_unlock(&cpu_base->lock);
	delta = ktime_sub(now, entry_time);
	if ((unsigned int)delta.tv64 > cpu_base->max_hang_time)
		cpu_base->max_hang_time = (unsigned int) delta.tv64;
	/*
	 * Limit it to a sensible value as we enforce a longer
	 * delay. Give the CPU at least 100ms to catch up.
	 */
	if (delta.tv64 > 100 * NSEC_PER_MSEC)
		expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
	else
		expires_next = ktime_add(now, delta);
	tick_program_event(expires_next, 1);
	printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
		    ktime_to_ns(delta));
}
Example #13
0
File: lib.c Project: 020gzh/linux
static void midi_port_work(struct work_struct *work)
{
	struct snd_fw_async_midi_port *port =
			container_of(work, struct snd_fw_async_midi_port, work);
	struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream);
	int generation;
	int type;

	/* Under transacting or error state. */
	if (!port->idling || port->error)
		return;

	/* Nothing to do. */
	if (substream == NULL || snd_rawmidi_transmit_empty(substream))
		return;

	/* Do it in next chance. */
	if (ktime_after(port->next_ktime, ktime_get())) {
		schedule_work(&port->work);
		return;
	}

	/*
	 * Fill the buffer. The callee must use snd_rawmidi_transmit_peek().
	 * Later, snd_rawmidi_transmit_ack() is called.
	 */
	memset(port->buf, 0, port->len);
	port->consume_bytes = port->fill(substream, port->buf);
	if (port->consume_bytes <= 0) {
		/* Do it in next chance, immediately. */
		if (port->consume_bytes == 0) {
			port->next_ktime = ktime_set(0, 0);
			schedule_work(&port->work);
		} else {
			/* Fatal error. */
			port->error = true;
		}
		return;
	}

	/* Calculate type of transaction. */
	if (port->len == 4)
		type = TCODE_WRITE_QUADLET_REQUEST;
	else
		type = TCODE_WRITE_BLOCK_REQUEST;

	/* Set interval to next transaction. */
	port->next_ktime = ktime_add_ns(ktime_get(),
				port->consume_bytes * 8 * NSEC_PER_SEC / 31250);

	/* Start this transaction. */
	port->idling = false;

	/*
	 * In Linux FireWire core, when generation is updated with memory
	 * barrier, node id has already been updated. In this module, After
	 * this smp_rmb(), load/store instructions to memory are completed.
	 * Thus, both of generation and node id are available with recent
	 * values. This is a light-serialization solution to handle bus reset
	 * events on IEEE 1394 bus.
	 */
	generation = port->parent->generation;
	smp_rmb();

	fw_send_request(port->parent->card, &port->transaction, type,
			port->parent->node_id, generation,
			port->parent->max_speed, port->addr,
			port->buf, port->len, async_midi_port_callback,
			port);
}
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
    static DEFINE_SPINLOCK(lowmem_lock);
    struct task_struct *tsk;
    struct task_struct *selected = NULL;
    int rem = 0;
    static int same_count;
    static int busy_count;
    static int busy_count_dropped;
    static int oldpid;
    static int lastpid;
    static ktime_t next_busy_print;
    int tasksize;
    int i;
    int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
    int selected_tasksize = 0;
    int selected_oom_score_adj;
    int array_size = ARRAY_SIZE(lowmem_adj);
    int other_free;
    int other_file;
    unsigned long nr_to_scan = sc->nr_to_scan;

    if (nr_to_scan > 0) {
        if (mutex_lock_interruptible(&scan_mutex) < 0)
            return 0;
    }

    other_free = global_page_state(NR_FREE_PAGES);
    other_file = global_page_state(NR_FILE_PAGES) -
                 global_page_state(NR_FILE_MAPPED);

    tune_lmk_param(&other_free, &other_file, sc);

    if (lowmem_adj_size < array_size)
        array_size = lowmem_adj_size;
    if (lowmem_minfree_size < array_size)
        array_size = lowmem_minfree_size;
    for (i = 0; i < array_size; i++) {
        if (other_free < lowmem_minfree[i] &&
                other_file < lowmem_minfree[i]) {
            min_score_adj = lowmem_adj[i];
            break;
        }
    }

    if (nr_to_scan > 0)
        lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
                     nr_to_scan, sc->gfp_mask, other_free,
                     other_file, min_score_adj);
    rem = global_page_state(NR_ACTIVE_ANON) +
          global_page_state(NR_ACTIVE_FILE) +
          global_page_state(NR_INACTIVE_ANON) +
          global_page_state(NR_INACTIVE_FILE);
    if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
        lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
                     nr_to_scan, sc->gfp_mask, rem);

        if (nr_to_scan > 0)
            mutex_unlock(&scan_mutex);

        return rem;
    }
    selected_oom_score_adj = min_score_adj;

    if (spin_trylock(&lowmem_lock) == 0) {
        if (ktime_us_delta(ktime_get(), next_busy_print) > 0) {
            lowmem_print(2, "Lowmemkiller busy %d %d %d\n",
                         busy_count, busy_count_dropped,
                         oom_killer_disabled);
            next_busy_print = ktime_add(ktime_get(),
                                        ktime_set(5, 0));
            busy_count_dropped = 0;
        }
        busy_count++;
        busy_count_dropped++;
        mutex_unlock(&scan_mutex);
        return LMK_BUSY;
    }

    rcu_read_lock();
    for_each_process(tsk) {
        struct task_struct *p;
        int oom_score_adj;

        if (tsk->flags & PF_KTHREAD)
            continue;

        /* if task no longer has any memory ignore it */
        if (test_task_flag(tsk, TIF_MM_RELEASED))
            continue;

        if (ktime_us_delta(ktime_get(), lowmem_deathpending_timeout) < 0
                && (test_task_flag(tsk, TIF_MEMDIE))) {
            same_count++;
            if (tsk->pid != oldpid || same_count > 1000) {
                lowmem_print(1, "terminate loop for %d (%s)" \
                             "old:%d last:%d %ld %d\n",
                             tsk->pid,
                             tsk->comm,
                             oldpid,
                             lastpid,
                             (long)ktime_us_delta(
                                 ktime_get(),
                                 lowmem_deathpending_timeout),
                             same_count);
                lowmem_print(2,
                             "state:%ld flag:0x%x la:%lld " \
                             "busy: %d %d\n",
                             tsk->state,
                             tsk->flags,
                             tsk->sched_info.last_arrival,
                             busy_count,
                             oom_killer_disabled);
                oldpid = tsk->pid;
                same_count = 0;
            }

            rcu_read_unlock();

            spin_unlock(&lowmem_lock);
            mutex_unlock(&scan_mutex);
            /* wait one jiffie */
            schedule_timeout(1);

            return LMK_BUSY;
        }

        p = find_lock_task_mm(tsk);
        if (!p)
            continue;

        oom_score_adj = p->signal->oom_score_adj;
        if (oom_score_adj < min_score_adj) {
            task_unlock(p);
            continue;
        }
        tasksize = get_mm_rss(p->mm);
        task_unlock(p);
        if (tasksize <= 0)
            continue;
        if (selected) {
            if (oom_score_adj < selected_oom_score_adj)
                continue;
            if (oom_score_adj == selected_oom_score_adj &&
                    tasksize <= selected_tasksize)
                continue;
        }
        selected = p;
        selected_tasksize = tasksize;
        selected_oom_score_adj = oom_score_adj;
        lowmem_print(4, "select %d (%s), adj %d, size %d, to kill\n",
                     p->pid, p->comm, oom_score_adj, tasksize);
    }
    if (selected) {
        lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
                     selected->pid, selected->comm,
                     selected_oom_score_adj, selected_tasksize);
        send_sig(SIGKILL, selected, 0);

        lowmem_deathpending_timeout = ktime_add_ns(ktime_get(),
                                      NSEC_PER_SEC/2);
        lowmem_print(2, "state:%ld flag:0x%x la:%lld busy:%d %d\n",
                     selected->state, selected->flags,
                     selected->sched_info.last_arrival,
                     busy_count, oom_killer_disabled);
        lastpid = selected->pid;
        set_tsk_thread_flag(selected, TIF_MEMDIE);
        rem -= selected_tasksize;
    }

    rcu_read_unlock();

    lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
                 nr_to_scan, sc->gfp_mask, rem);

    spin_unlock(&lowmem_lock);
    mutex_unlock(&scan_mutex);
    return rem;
}
/**
 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
 *
 * When the next event is more than a tick into the future, stop the idle tick
 * Called either from the idle loop or from irq_exit() when an idle period was
 * just interrupted by an interrupt which did not cause a reschedule.
 */
void tick_nohz_stop_sched_tick(int inidle)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
	struct tick_sched *ts;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	local_irq_save(flags);

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	/*
	 * Call to tick_nohz_start_idle stops the last_update_time from being
	 * updated. Thus, it must not be called in the event we are called from
	 * irq_exit() with the prior state different than idle.
	 */
	if (!inidle && !ts->inidle)
		goto end;

	/*
	 * Set ts->inidle unconditionally. Even if the system did not
	 * switch to NOHZ mode the cpu frequency governers rely on the
	 * update of the idle time accounting in tick_nohz_start_idle().
	 */
	ts->inidle = 1;

	now = tick_nohz_start_idle(ts);

	/*
	 * If this cpu is offline and it is the one which updates
	 * jiffies, then give up the assignment and let it be taken by
	 * the cpu which runs the tick timer next. If we don't drop
	 * this here the jiffies might be stale and do_timer() never
	 * invoked.
	 */
	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		goto end;

	if (need_resched())
		goto end;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       local_softirq_pending());
			ratelimit++;
		}
		goto end;
	}

	ts->idle_calls++;
	/* Read jiffies and the time when jiffies were updated last */
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;

		/*
		 * On SMP we really should only care for the CPU which
		 * has the do_timer duty assigned. All other CPUs can
		 * sleep as long as they want.
		 */
		if (cpu == tick_do_timer_cpu ||
		    tick_do_timer_cpu == TICK_DO_TIMER_NONE)
			time_delta = timekeeping_max_deferment();
		else
			time_delta = KTIME_MAX;
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		/* Get the next timer wheel timer */
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
	}
	/*
	 * Do not stop the tick, if we are only one off
	 * or if the cpu is required for rcu
	 */
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	/* Schedule the tick, if we are at least one jiffie off */
	if ((long)delta_jiffies >= 1) {

		/*
		 * calculate the expiry time for the next timer wheel
		 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
		 * that there is no timer pending or at least extremely
		 * far into the future (12 days for HZ=1000). In this
		 * case we set the expiry to the end of time.
		 */
		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			/*
			 * Calculate the time delta for the next timer event.
			 * If the time delta exceeds the maximum time delta
			 * permitted by the current clocksource then adjust
			 * the time delta accordingly to ensure the
			 * clocksource does not wrap.
			 */
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
			expires = ktime_add_ns(last_update, time_delta);
		} else {
			expires.tv64 = KTIME_MAX;
		}

		/*
		 * If this cpu is the one which updates jiffies, then
		 * give up the assignment and let it be taken by the
		 * cpu which runs the tick timer next, which might be
		 * this cpu as well. If we don't drop this here the
		 * jiffies might be stale and do_timer() never
		 * invoked.
		 */
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;

		if (delta_jiffies > 1)
			cpumask_set_cpu(cpu, nohz_cpu_mask);

		/* Skip reprogram of event if its not changed */
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		/*
		 * nohz_stop_sched_tick can be called several times before
		 * the nohz_restart_sched_tick is called. This happens when
		 * interrupts arrive which do not cause a reschedule. In the
		 * first call we save the current tick time, so we can restart
		 * the scheduler tick in nohz_restart_sched_tick.
		 */
		if (!ts->tick_stopped) {
			if (select_nohz_load_balancer(1)) {
				/*
				 * sched tick not stopped!
				 */
				cpumask_clear_cpu(cpu, nohz_cpu_mask);
				goto out;
			}

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
			rcu_enter_nohz();
		}

		ts->idle_sleeps++;

		/* Mark expires */
		ts->idle_expires = expires;

		/*
		 * If the expiration time == KTIME_MAX, then
		 * in this case we simply stop the tick timer.
		 */
		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
			goto out;
		}

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS_PINNED);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if (!tick_program_event(expires, 0))
				goto out;
		/*
		 * We are past the event already. So we crossed a
		 * jiffie boundary. Update jiffies and raise the
		 * softirq.
		 */
		tick_do_update_jiffies64(ktime_get());
		cpumask_clear_cpu(cpu, nohz_cpu_mask);
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
	ts->sleep_length = ktime_sub(dev->next_event, now);
end:
	local_irq_restore(flags);
}
Example #16
0
int jbd2_journal_stop(handle_t *handle)
{
	transaction_t *transaction = handle->h_transaction;
	journal_t *journal = transaction->t_journal;
	int err, wait_for_commit = 0;
	tid_t tid;
	pid_t pid;

	J_ASSERT(journal_current_handle() == handle);

	if (is_handle_aborted(handle))
		err = -EIO;
	else {
		J_ASSERT(atomic_read(&transaction->t_updates) > 0);
		err = 0;
	}

	if (--handle->h_ref > 0) {
		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
			  handle->h_ref);
		return err;
	}

	jbd_debug(4, "Handle %p going down\n", handle);

	pid = current->pid;
	if (handle->h_sync && journal->j_last_sync_writer != pid) {
		u64 commit_time, trans_time;

		journal->j_last_sync_writer = pid;

		read_lock(&journal->j_state_lock);
		commit_time = journal->j_average_commit_time;
		read_unlock(&journal->j_state_lock);

		trans_time = ktime_to_ns(ktime_sub(ktime_get(),
						   transaction->t_start_time));

		commit_time = max_t(u64, commit_time,
				    1000*journal->j_min_batch_time);
		commit_time = min_t(u64, commit_time,
				    1000*journal->j_max_batch_time);

		if (trans_time < commit_time) {
			ktime_t expires = ktime_add_ns(ktime_get(),
						       commit_time);
			set_current_state(TASK_UNINTERRUPTIBLE);
			schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
		}
	}

	if (handle->h_sync)
		transaction->t_synchronous_commit = 1;
	current->journal_info = NULL;
	atomic_sub(handle->h_buffer_credits,
		   &transaction->t_outstanding_credits);

	if (handle->h_sync ||
	    (atomic_read(&transaction->t_outstanding_credits) >
	     journal->j_max_transaction_buffers) ||
	    time_after_eq(jiffies, transaction->t_expires)) {

		jbd_debug(2, "transaction too old, requesting commit for "
					"handle %p\n", handle);
		
		jbd2_log_start_commit(journal, transaction->t_tid);

		if (handle->h_sync && !(current->flags & PF_MEMALLOC))
			wait_for_commit = 1;
	}

	tid = transaction->t_tid;
	if (atomic_dec_and_test(&transaction->t_updates)) {
		wake_up(&journal->j_wait_updates);
		if (journal->j_barrier_count)
			wake_up(&journal->j_wait_transaction_locked);
	}

	if (wait_for_commit)
		err = jbd2_log_wait_commit(journal, tid);

	lock_map_release(&handle->h_lockdep_map);

	jbd2_free_handle(handle);
	return err;
}
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	ktime_t expires_next, now, entry_time, delta;
	int i, retries = 0;

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

	raw_spin_lock(&cpu_base->lock);
	entry_time = now = hrtimer_update_base(cpu_base);
retry:
	expires_next.tv64 = KTIME_MAX;
	/*
	 * We set expires_next to KTIME_MAX here with cpu_base->lock
	 * held to prevent that a timer is enqueued in our queue via
	 * the migration code. This does not affect enqueueing of
	 * timers which run their callback and need to be requeued on
	 * this CPU.
	 */
	cpu_base->expires_next.tv64 = KTIME_MAX;

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		struct hrtimer_clock_base *base;
		struct timerqueue_node *node;
		ktime_t basenow;

		if (!(cpu_base->active_bases & (1 << i)))
			continue;

		base = cpu_base->clock_base + i;
		basenow = ktime_add(now, base->offset);

		while ((node = timerqueue_getnext(&base->active))) {
			struct hrtimer *timer;

			timer = container_of(node, struct hrtimer, node);

			/*
			 * The immediate goal for using the softexpires is
			 * minimizing wakeups, not running timers at the
			 * earliest interrupt after their soft expiration.
			 * This allows us to avoid using a Priority Search
			 * Tree, which can answer a stabbing querry for
			 * overlapping intervals and instead use the simple
			 * BST we already have.
			 * We don't add extra wakeups by delaying timers that
			 * are right-of a not yet expired timer, because that
			 * timer will have to trigger a wakeup anyway.
			 */

			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
				ktime_t expires;

				expires = ktime_sub(hrtimer_get_expires(timer),
						    base->offset);
				if (expires.tv64 < 0)
					expires.tv64 = KTIME_MAX;
				if (expires.tv64 < expires_next.tv64)
					expires_next = expires;
				break;
			}

			__run_hrtimer(timer, &basenow);
		}
	}

	/*
	 * Store the new expiry value so the migration code can verify
	 * against it.
	 */
	cpu_base->expires_next = expires_next;
	raw_spin_unlock(&cpu_base->lock);

	/* Reprogramming necessary ? */
	if (expires_next.tv64 == KTIME_MAX ||
	    !tick_program_event(expires_next, 0)) {
		cpu_base->hang_detected = 0;
		return;
	}

	/*
	 * The next timer was already expired due to:
	 * - tracing
	 * - long lasting callbacks
	 * - being scheduled away when running in a VM
	 *
	 * We need to prevent that we loop forever in the hrtimer
	 * interrupt routine. We give it 3 attempts to avoid
	 * overreacting on some spurious event.
	 *
	 * Acquire base lock for updating the offsets and retrieving
	 * the current time.
	 */
	raw_spin_lock(&cpu_base->lock);
	now = hrtimer_update_base(cpu_base);
	cpu_base->nr_retries++;
	if (++retries < 3)
		goto retry;
	/*
	 * Give the system a chance to do something else than looping
	 * here. We stored the entry time, so we know exactly how long
	 * we spent here. We schedule the next event this amount of
	 * time away.
	 */
	cpu_base->nr_hangs++;
	cpu_base->hang_detected = 1;
	raw_spin_unlock(&cpu_base->lock);
	delta = ktime_sub(now, entry_time);
	if (delta.tv64 > cpu_base->max_hang_time.tv64)
		cpu_base->max_hang_time = delta;
	/*
	 * Limit it to a sensible value as we enforce a longer
	 * delay. Give the CPU at least 100ms to catch up.
	 */
	if (delta.tv64 > 100 * NSEC_PER_MSEC)
		expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
	else
		expires_next = ktime_add(now, delta);
	tick_program_event(expires_next, 1);
	printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
		    ktime_to_ns(delta));
}
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
	static DEFINE_SPINLOCK(lowmem_lock);
	struct task_struct *tsk;
	struct task_struct *selected = NULL;
	int rem = 0;
	static int same_count;
	static int busy_count;
	static int busy_count_dropped;
	static int oldpid;
	static int lastpid;
	static ktime_t next_busy_print;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
	int selected_tasksize = 0;
	int selected_oom_score_adj;
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free = global_page_state(NR_FREE_PAGES);
	int other_file = global_page_state(NR_FILE_PAGES) -
						global_page_state(NR_FILE_MAPPED);

	if (lowmem_adj_size < array_size)
		array_size = lowmem_adj_size;
	if (lowmem_minfree_size < array_size)
		array_size = lowmem_minfree_size;
	for (i = 0; i < array_size; i++) {
		/*
		 * Convert lowmem_minfree[i] to signed to avoid that other_free
		 * and/or other_file are converted to unsigned.
		 *
		 */
		if (other_free < (int) lowmem_minfree[i] &&
		    other_file < (int) lowmem_minfree[i]) {
			min_score_adj = lowmem_adj[i];
			break;
		}
	}
	if (sc->nr_to_scan > 0)
		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d (%lu %lu), ma %d\n",
				sc->nr_to_scan, sc->gfp_mask, other_free,
				other_file,
				global_page_state(NR_FILE_PAGES),
				global_page_state(NR_FILE_MAPPED),
				min_score_adj);
	rem = global_page_state(NR_ACTIVE_ANON) +
		global_page_state(NR_ACTIVE_FILE) +
		global_page_state(NR_INACTIVE_ANON) +
		global_page_state(NR_INACTIVE_FILE);
	if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
		lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
			     sc->nr_to_scan, sc->gfp_mask, rem);

		return rem;
	}
	selected_oom_score_adj = min_score_adj;

	if (spin_trylock(&lowmem_lock) == 0) {
		if (ktime_us_delta(ktime_get(), next_busy_print) > 0) {
			lowmem_print(2, "Lowmemkiller busy %d %d %d\n",
				busy_count, busy_count_dropped,
				oom_killer_disabled);
			next_busy_print = ktime_add(ktime_get(),
						    ktime_set(5, 0));
			busy_count_dropped = 0;
		}
		busy_count++;
		busy_count_dropped++;
		return LMK_BUSY;
	}
	/* turn of scheduling to protect task list */
	rcu_read_lock();
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;

		if (tsk->flags & PF_KTHREAD)
			continue;

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
			ktime_us_delta(ktime_get(),
				lowmem_deathpending_timeout) < 0) {
			task_unlock(p);
			same_count++;
			if (p->pid != oldpid || same_count > 1000) {
				lowmem_print(1,
					"terminate %d (%s) old:%d last:%d %ld %d\n",
					p->pid, p->comm, oldpid, lastpid,
					(long)ktime_us_delta(ktime_get(),
						lowmem_deathpending_timeout),
					same_count);
				lowmem_print(2,
					"state:%ld flag:0x%x la:%lld busy:%d %d\n",
					p->state, p->flags,
					p->sched_info.last_arrival,
					busy_count, oom_killer_disabled);
				oldpid = p->pid;
				same_count = 0;
			}
			rcu_read_unlock();
			spin_unlock(&lowmem_lock);
			/* wait one jiffie */
			schedule_timeout(1);
			return LMK_BUSY;
		}
		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
		task_unlock(p);
		if (tasksize <= 0)
			continue;
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		lowmem_print(4, "select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
	}
	if (selected) {
		lowmem_print(1, "send sigkill to %d (%s), adj %d,\n"
				"   to free %ldkB on behalf of '%s' (%d) because\n"
				"   cache %ldkB is below limit %ldkB for oom_score_adj %d\n"
				"   Free memory is %ldkB above reserved\n",
				 selected->pid, selected->comm,
				 selected_oom_score_adj,
				 selected_tasksize * (long)(PAGE_SIZE / 1024),
				 current->comm, current->pid,
				 other_file * (long)(PAGE_SIZE / 1024),
				 lowmem_minfree[i] * (long)(PAGE_SIZE / 1024),
				 min_score_adj,
				 other_free * (long)(PAGE_SIZE / 1024));
		send_sig(SIGKILL, selected, 0);

		lowmem_deathpending_timeout = ktime_add_ns(ktime_get(),
							   NSEC_PER_SEC/2);
		lowmem_print(2, "state:%ld flag:0x%x la:%lld busy:%d %d\n",
			     selected->state, selected->flags,
			     selected->sched_info.last_arrival,
			     busy_count, oom_killer_disabled);
		lastpid = selected->pid;
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
	}
	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
		     sc->nr_to_scan, sc->gfp_mask, rem);
	rcu_read_unlock();
	spin_unlock(&lowmem_lock);
	return rem;
}