Пример #1
0
void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
			struct clocksource *c, u32 mult)
{
	write_seqcount_begin(&fsyscall_gtod_data.seq);

        /* copy fsyscall clock data */
        fsyscall_gtod_data.clk_mask = c->mask;
        fsyscall_gtod_data.clk_mult = mult;
        fsyscall_gtod_data.clk_shift = c->shift;
        fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
        fsyscall_gtod_data.clk_cycle_last = c->cycle_last;

	/* copy kernel time structures */
        fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
        fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
	fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
							+ wall->tv_sec;
	fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
							+ wall->tv_nsec;

	/* normalize */
	while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
		fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
		fsyscall_gtod_data.monotonic_time.tv_sec++;
	}

	write_seqcount_end(&fsyscall_gtod_data.seq);
}
Пример #2
0
void update_vsyscall(struct timekeeper *tk)
{
	write_seqcount_begin(&fsyscall_gtod_data.seq);

	/* copy vsyscall data */
	fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask;
	fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult;
	fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift;
	fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio;
	fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last;

	fsyscall_gtod_data.wall_time.sec = tk->xtime_sec;
	fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec;

	fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec
					      + tk->wall_to_monotonic.tv_sec;
	fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec
						+ ((u64)tk->wall_to_monotonic.tv_nsec
							<< tk->tkr_mono.shift);

	/* normalize */
	while (fsyscall_gtod_data.monotonic_time.snsec >=
					(((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
		fsyscall_gtod_data.monotonic_time.snsec -=
					((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
		fsyscall_gtod_data.monotonic_time.sec++;
	}

	write_seqcount_end(&fsyscall_gtod_data.seq);
}
Пример #3
0
static void est_timer(unsigned long arg)
{
	struct net_rate_estimator *est = (struct net_rate_estimator *)arg;
	struct gnet_stats_basic_packed b;
	u64 rate, brate;

	est_fetch_counters(est, &b);
	brate = (b.bytes - est->last_bytes) << (8 - est->ewma_log);
	brate -= (est->avbps >> est->ewma_log);

	rate = (u64)(b.packets - est->last_packets) << (8 - est->ewma_log);
	rate -= (est->avpps >> est->ewma_log);

	write_seqcount_begin(&est->seq);
	est->avbps += brate;
	est->avpps += rate;
	write_seqcount_end(&est->seq);

	est->last_bytes = b.bytes;
	est->last_packets = b.packets;

	est->next_jiffies += ((HZ/4) << est->intvl_log);

	if (unlikely(time_after_eq(jiffies, est->next_jiffies))) {
		/* Ouch... timer was delayed. */
		est->next_jiffies = jiffies + 1;
	}
	mod_timer(&est->timer, est->next_jiffies);
}
Пример #4
0
void chroot_fs_refs(struct path *old_root, struct path *new_root)
{
	struct task_struct *g, *p;
	struct fs_struct *fs;
	int count = 0;

	read_lock(&tasklist_lock);
	do_each_thread(g, p) {
		task_lock(p);
		fs = p->fs;
		if (fs) {
			spin_lock(&fs->lock);
			write_seqcount_begin(&fs->seq);
			if (fs->root.dentry == old_root->dentry
			    && fs->root.mnt == old_root->mnt) {
				path_get_longterm(new_root);
				fs->root = *new_root;
				count++;
			}
			if (fs->pwd.dentry == old_root->dentry
			    && fs->pwd.mnt == old_root->mnt) {
				path_get_longterm(new_root);
				fs->pwd = *new_root;
				count++;
			}
			write_seqcount_end(&fs->seq);
			spin_unlock(&fs->lock);
		}
		task_unlock(p);
	} while_each_thread(g, p);
Пример #5
0
void reservation_object_add_excl_fence(struct reservation_object *obj,
				       struct fence *fence)
{
	struct fence *old_fence = reservation_object_get_excl(obj);
	struct reservation_object_list *old;
	u32 i = 0;

	old = reservation_object_get_list(obj);
	if (old)
		i = old->shared_count;

	if (fence)
		fence_get(fence);

	preempt_disable();
	write_seqcount_begin(&obj->seq);
	/* write_seqcount_begin provides the necessary memory barrier */
	RCU_INIT_POINTER(obj->fence_excl, fence);
	if (old)
		old->shared_count = 0;
	write_seqcount_end(&obj->seq);
	preempt_enable();

	/* inplace update, no shared fences */
	while (i--)
		fence_put(rcu_dereference_protected(old->shared[i],
						reservation_object_held(obj)));

	if (old_fence)
		fence_put(old_fence);
}
Пример #6
0
void enabled_wait(void)
{
	struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
	unsigned long long idle_time;
	unsigned long psw_mask;

	trace_hardirqs_on();

	/* Wait for external, I/O or machine check interrupt. */
	psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
		PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
	clear_cpu_flag(CIF_NOHZ_DELAY);

	/* Call the assembler magic in entry.S */
	psw_idle(idle, psw_mask);

	trace_hardirqs_off();

	/* Account time spent with enabled wait psw loaded as idle time. */
	write_seqcount_begin(&idle->seqcount);
	idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
	idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
	idle->idle_time += idle_time;
	idle->idle_count++;
	account_idle_time(idle_time);
	write_seqcount_end(&idle->seqcount);
}
Пример #7
0
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
			struct clocksource *clock, u32 mult)
{
	struct timespec monotonic;

	write_seqcount_begin(&vsyscall_gtod_data.seq);

	/* copy vsyscall data */
	vsyscall_gtod_data.clock.vclock_mode	= clock->archdata.vclock_mode;
	vsyscall_gtod_data.clock.cycle_last	= clock->cycle_last;
	vsyscall_gtod_data.clock.mask		= clock->mask;
	vsyscall_gtod_data.clock.mult		= mult;
	vsyscall_gtod_data.clock.shift		= clock->shift;

	vsyscall_gtod_data.wall_time_sec	= wall_time->tv_sec;
	vsyscall_gtod_data.wall_time_nsec	= wall_time->tv_nsec;

	monotonic = timespec_add(*wall_time, *wtm);
	vsyscall_gtod_data.monotonic_time_sec	= monotonic.tv_sec;
	vsyscall_gtod_data.monotonic_time_nsec	= monotonic.tv_nsec;

	vsyscall_gtod_data.wall_time_coarse	= __current_kernel_time();
	vsyscall_gtod_data.monotonic_time_coarse =
		timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);

	write_seqcount_end(&vsyscall_gtod_data.seq);
}
Пример #8
0
static void
reservation_object_add_shared_replace(struct reservation_object *obj,
				      struct reservation_object_list *old,
				      struct reservation_object_list *fobj,
				      struct fence *fence)
{
	unsigned i;
	struct fence *old_fence = NULL;

	fence_get(fence);

	if (!old) {
		RCU_INIT_POINTER(fobj->shared[0], fence);
		fobj->shared_count = 1;
		goto done;
	}

	/*
	 * no need to bump fence refcounts, rcu_read access
	 * requires the use of kref_get_unless_zero, and the
	 * references from the old struct are carried over to
	 * the new.
	 */
	fobj->shared_count = old->shared_count;

	for (i = 0; i < old->shared_count; ++i) {
		struct fence *check;

		check = rcu_dereference_protected(old->shared[i],
						reservation_object_held(obj));

		if (!old_fence && check->context == fence->context) {
			old_fence = check;
			RCU_INIT_POINTER(fobj->shared[i], fence);
		} else
			RCU_INIT_POINTER(fobj->shared[i], check);
	}
	if (!old_fence) {
		RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
		fobj->shared_count++;
	}

done:
	preempt_disable();
	write_seqcount_begin(&obj->seq);
	/*
	 * RCU_INIT_POINTER can be used here,
	 * seqcount provides the necessary barriers
	 */
	RCU_INIT_POINTER(obj->fence, fobj);
	write_seqcount_end(&obj->seq);
	preempt_enable();

	if (old)
		kfree_rcu(old, rcu);

	if (old_fence)
		fence_put(old_fence);
}
Пример #9
0
// ARM10C 20160521
// current->fs: (&init_task)->fs, &root
void set_fs_pwd(struct fs_struct *fs, const struct path *path)
{
	struct path old_pwd;

	// path: &root
	path_get(path);

	// path_get에서 한일:
	// [pcp0] (kmem_cache#2-oX (struct mount))->mnt_pcp->mnt_count 을 1만큼 증가 시킴
	// (&(kmem_cache#5-oX (struct dentry))->d_lockref)->count: 1 만큼 증가 시킴

	// &fs->lock: &((&init_task)->fs)->lock
	spin_lock(&fs->lock);

	// spin_lock 에서 한일:
	// &((&init_task)->fs)->lock 을 사용하여 spin lock 을 수행

	// &fs->seq: &((&init_task)->fs)->seq
	write_seqcount_begin(&fs->seq);

	// write_seqcount_begin에서 한일:
	// (&((&init_task)->fs)->seq)->sequence: 1
	// 공유자원을 다른 cpu core가 사용할수 있게 메모리 적용

	// fs->pwd: ((&init_task)->fs)->pwd: (&init_fs)->pwd: 맴버가 0 으로 초기화된 값
	old_pwd = fs->pwd;
	// old_pwd: 맴버가 0 으로 초기화된 값

	// root.mnt: &(kmem_cache#2-oX (struct mount))->mnt
	// root.dentry: kmem_cache#5-oX (struct dentry)

	// fs->pwd: ((&init_task)->fs)->pwd: (&init_fs)->pwd: 맴버가 0 으로 초기화된 값, *path: root
	fs->pwd = *path;
	// fs->pwd: ((&init_task)->fs)->pwd.mnt: &(kmem_cache#2-oX (struct mount))->mnt
	// fs->pwd: ((&init_task)->fs)->pwd.dentry: kmem_cache#5-oX (struct dentry)

	// &fs->seq: &((&init_task)->fs)->seq
	write_seqcount_end(&fs->seq);

	// write_seqcount_end에서 한일:
	// 공유자원을 다른 cpu core가 사용할수 있게 메모리 적용
	// (&((&init_task)->fs)->seq)->sequence: 2

	// &fs->lock: &((&init_task)->fs)->lock
	spin_unlock(&fs->lock);

	// spin_unlock 에서 한일:
	// &((&init_task)->fs)->lock 을 사용하여 spin unlock 을 수행

	// old_pwd.dentry: NULL
	if (old_pwd.dentry)
		path_put(&old_pwd);
}
Пример #10
0
static void
reservation_object_add_shared_inplace(struct reservation_object *obj,
				      struct reservation_object_list *fobj,
				      struct fence *fence)
{
	u32 i;

	fence_get(fence);

	preempt_disable();
	write_seqcount_begin(&obj->seq);

	for (i = 0; i < fobj->shared_count; ++i) {
		struct fence *old_fence;

		old_fence = rcu_dereference_protected(fobj->shared[i],
						reservation_object_held(obj));

		if (old_fence->context == fence->context) {
			/* memory barrier is added by write_seqcount_begin */
			RCU_INIT_POINTER(fobj->shared[i], fence);
			write_seqcount_end(&obj->seq);
			preempt_enable();

			fence_put(old_fence);
			return;
		}
	}

	/*
	 * memory barrier is added by write_seqcount_begin,
	 * fobj->shared_count is protected by this lock too
	 */
	RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
	fobj->shared_count++;

	write_seqcount_end(&obj->seq);
	preempt_enable();
}
Пример #11
0
/*
 * Initialize and return retrieve the jiffies update.
 */
static ktime_t tick_init_jiffy_update(void)
{
	ktime_t period;

	raw_spin_lock(&xtime_lock);
	write_seqcount_begin(&xtime_seq);
	/* Did we start the jiffies update yet ? */
	if (last_jiffies_update.tv64 == 0)
		last_jiffies_update = tick_next_period;
	period = last_jiffies_update;
	write_seqcount_end(&xtime_seq);
	raw_spin_unlock(&xtime_lock);
	return period;
}
Пример #12
0
void psi_memstall_tick(struct task_struct *task, int cpu)
{
	struct psi_group *group;
	void *iter = NULL;

	while ((group = iterate_groups(task, &iter))) {
		struct psi_group_cpu *groupc;

		groupc = per_cpu_ptr(group->pcpu, cpu);
		write_seqcount_begin(&groupc->seq);
		record_times(groupc, cpu, true);
		write_seqcount_end(&groupc->seq);
	}
}
Пример #13
0
/*
 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
 * It can block.
 */
void set_fs_root(struct fs_struct *fs, struct path *path)
{
	struct path old_root;

	spin_lock(&fs->lock);
	write_seqcount_begin(&fs->seq);
	old_root = fs->root;
	fs->root = *path;
	path_get_longterm(path);
	write_seqcount_end(&fs->seq);
	spin_unlock(&fs->lock);
	if (old_root.dentry)
		path_put_longterm(&old_root);
}
Пример #14
0
/*
 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
 * It can block.
 */
void set_fs_pwd(struct fs_struct *fs, struct path *path)
{
	struct path old_pwd;

	spin_lock(&fs->lock);
	write_seqcount_begin(&fs->seq);
	old_pwd = fs->pwd;
	fs->pwd = *path;
	path_get_longterm(path);
	write_seqcount_end(&fs->seq);
	spin_unlock(&fs->lock);

	if (old_pwd.dentry)
		path_put_longterm(&old_pwd);
}
Пример #15
0
/*
 * Periodic tick
 */
static void tick_periodic(int cpu)
{
	if (tick_do_timer_cpu == cpu) {
		raw_spin_lock(&xtime_lock);
		write_seqcount_begin(&xtime_seq);

		/* Keep track of the next tick event */
		tick_next_period = ktime_add(tick_next_period, tick_period);

		do_timer(1);
		write_seqcount_end(&xtime_seq);
		raw_spin_unlock(&xtime_lock);
	}

	update_process_times(user_mode(get_irq_regs()));
	profile_tick(CPU_PROFILING);
}
Пример #16
0
static inline void nft_counter_do_eval(struct nft_counter_percpu_priv *priv,
				       struct nft_regs *regs,
				       const struct nft_pktinfo *pkt)
{
	struct nft_counter *this_cpu;
	seqcount_t *myseq;

	local_bh_disable();
	this_cpu = this_cpu_ptr(priv->counter);
	myseq = this_cpu_ptr(&nft_counter_seq);

	write_seqcount_begin(myseq);

	this_cpu->bytes += pkt->skb->len;
	this_cpu->packets++;

	write_seqcount_end(myseq);
	local_bh_enable();
}
Пример #17
0
/*
 * Must be called with interrupts disabled !
 */
static void tick_do_update_jiffies64(ktime_t now)
{
	unsigned long ticks = 0;
	ktime_t delta;

	/*
	 * Do a quick check without holding xtime_lock:
	 */
	delta = ktime_sub(now, last_jiffies_update);
	if (delta.tv64 < tick_period.tv64)
		return;

	/* Reevalute with xtime_lock held */
	raw_spin_lock(&xtime_lock);
	write_seqcount_begin(&xtime_seq);

	delta = ktime_sub(now, last_jiffies_update);
	if (delta.tv64 >= tick_period.tv64) {

		delta = ktime_sub(delta, tick_period);
		last_jiffies_update = ktime_add(last_jiffies_update,
						tick_period);

		/* Slow path for long timeouts */
		if (unlikely(delta.tv64 >= tick_period.tv64)) {
			s64 incr = ktime_to_ns(tick_period);

			ticks = ktime_divns(delta, incr);

			last_jiffies_update = ktime_add_ns(last_jiffies_update,
							   incr * ticks);
		}
		do_timer(++ticks);

		/* Keep the tick_next_period variable up to date */
		tick_next_period = ktime_add(last_jiffies_update, tick_period);
	}
	write_seqcount_end(&xtime_seq);
	raw_spin_unlock(&xtime_lock);
}
Пример #18
0
static void psi_group_change(struct psi_group *group, int cpu,
			     unsigned int clear, unsigned int set)
{
	struct psi_group_cpu *groupc;
	unsigned int t, m;

	groupc = per_cpu_ptr(group->pcpu, cpu);

	/*
	 * First we assess the aggregate resource states this CPU's
	 * tasks have been in since the last change, and account any
	 * SOME and FULL time these may have resulted in.
	 *
	 * Then we update the task counts according to the state
	 * change requested through the @clear and @set bits.
	 */
	write_seqcount_begin(&groupc->seq);

	record_times(groupc, cpu, false);

	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
		if (!(m & (1 << t)))
			continue;
		if (groupc->tasks[t] == 0 && !psi_bug) {
			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n",
					cpu, t, groupc->tasks[0],
					groupc->tasks[1], groupc->tasks[2],
					clear, set);
			psi_bug = 1;
		}
		groupc->tasks[t]--;
	}

	for (t = 0; set; set &= ~(1 << t), t++)
		if (set & (1 << t))
			groupc->tasks[t]++;

	write_seqcount_end(&groupc->seq);
}