Beispiel #1
0
void unregister_timer_hook(int (*hook)(struct pt_regs *))
{
	WARN_ON(hook != timer_hook);
	timer_hook = NULL;
	/* make sure all CPUs see the NULL hook */
	synchronize_sched();  /* Allow ongoing interrupts to complete. */
}
Beispiel #2
0
/**
 * input_grab_device - grabs device for exclusive use
 * @handle: input handle that wants to own the device
 *
 * When a device is grabbed by an input handle all events generated by
 * the device are delivered only to this handle. Also events injected
 * by other input handles are ignored while device is grabbed.
 */
int input_grab_device(struct input_handle *handle)
{
	struct input_dev *dev = handle->dev;
	int retval;

	retval = mutex_lock_interruptible(&dev->mutex);
	if (retval)
		return retval;

	if (dev->grab) {
		retval = -EBUSY;
		goto out;
	}

	rcu_assign_pointer(dev->grab, handle);
	/*
	 * Not using synchronize_rcu() because read-side is protected
	 * by a spinlock with interrupts off instead of rcu_read_lock().
	 */
	synchronize_sched();

 out:
	mutex_unlock(&dev->mutex);
	return retval;
}
Beispiel #3
0
static void vmmr0_synchronize_sched_expedited(void)
{
	int cpu;
	bool need_full_sync = 0;
	struct sync_req *req;
	long snap;
	int trycount = 0;

	smp_mb();  /* ensure prior mod happens before capturing snap. */
	snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
	get_online_cpus();
	while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
		put_online_cpus();
		if (trycount++ < 10)
			udelay(trycount * num_online_cpus());
		else {
			synchronize_sched();
			return;
		}
		if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
			smp_mb(); /* ensure test happens before caller kfree */
			return;
		}
		get_online_cpus();
	}
	for_each_online_cpu(cpu) {
		req = &per_cpu(sync_req, cpu);
		init_completion(&req->done);
		smp_wmb();
		req->pending = true;
		wake_up_process(per_cpu(sync_thread, cpu));
	}
	for_each_online_cpu(cpu) {
		req = &per_cpu(sync_req, cpu);
		wait_for_completion(&req->done);
		if (unlikely(!req->success))
			need_full_sync = 1;
	}
	synchronize_sched_expedited_count++;
	mutex_unlock(&rcu_sched_expedited_mutex);
	put_online_cpus();
	if (need_full_sync)
		synchronize_sched();
}
Beispiel #4
0
/*
 * Test each non-SRCU synchronous grace-period wait API.  This is
 * useful just after a change in mode for these primitives, and
 * during early boot.
 */
void rcu_test_sync_prims(void)
{
	if (!IS_ENABLED(CONFIG_PROVE_RCU))
		return;
	synchronize_rcu();
	synchronize_rcu_bh();
	synchronize_sched();
	synchronize_rcu_expedited();
	synchronize_rcu_bh_expedited();
	synchronize_sched_expedited();
}
Beispiel #5
0
static void __input_release_device(struct input_handle *handle)
{
	struct input_dev *dev = handle->dev;

	if (dev->grab == handle) {
		rcu_assign_pointer(dev->grab, NULL);
		/* Make sure input_pass_event() notices that grab is gone */
		synchronize_sched();

		list_for_each_entry(handle, &dev->h_list, d_node)
			if (handle->open && handle->handler->start)
				handle->handler->start(handle);
	}
int efab_linux_trampoline_dtor(int no_sct)
{
    int waiting = 0;

    /* Restore syscalls */
    linux_trampoline_ppc64_restore_syscalls();

    /* Give any just-entered syscalls a chance to increment their
     *  atomic
     */
    synchronize_sched();
#ifdef CONFIG_PREEMPT
    schedule_timeout(msecs_to_jiffies(50));
#endif
    while (atomic_read(&efab_syscall_used))
    {
        if (!waiting)
        {
            ci_log("Waiting for intercepted syscalls to finish .. ");
            waiting = 1;
        }
        schedule_timeout(msecs_to_jiffies(50));
    }
    if (waiting)
    {
        ci_log("\t .. OK");
        synchronize_sched();
#ifdef CONFIG_PREEMPT
        /* Try to wait .. */
        schedule_timeout(msecs_to_jiffies(50));
        ci_log("Unload is dangerous on RT kernels: prepare to crash.");
#endif
    }
 
    linux_trampoline_ppc64_dispose();
   
    return 0;
}
static void timer_stop(void)
{
	enable_timer_nmi_watchdog();
	unset_nmi_callback();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
	synchronize_sched();  /* Allow already-started NMIs to complete. */
#else
	synchronize_kernel();
#endif

#ifdef RRPROFILE
	disable_poll_idle();
#endif // RRPROFILE
}
Beispiel #8
0
static
void tracker_call_rcu_workqueue(struct work_struct *work)
{
       struct latency_tracker *tracker;
       struct llist_node *list;
       struct latency_tracker_event *e, *n;

       tracker = container_of(work, struct latency_tracker,
		       tracker_call_rcu_w.work);
       list = llist_del_all(&tracker->to_release);
       synchronize_sched();
       llist_for_each_entry_safe(e, n, list, llist)
	       wrapper_freelist_put_event(tracker, e);
}
Beispiel #9
0
/**
 * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
 * @ind: indicator for which the handler is to be unregistered
 * @isc: interruption subclass
 */
void s390_unregister_adapter_interrupt(void *ind, u8 isc)
{
	struct airq_t *airq;
	char dbf_txt[16];
	int i;

	i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]);
	snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
	CIO_TRACE_EVENT(4, dbf_txt);
	indicators[isc].byte[i] = 0;
	airq = xchg(&airqs[isc][i], NULL);
	/*
	 * Allow interrupts to complete. This will ensure that the airq handle
	 * is no longer referenced by any interrupt handler.
	 */
	synchronize_sched();
	kfree(airq);
}
Beispiel #10
0
int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
{
	might_sleep();

	if (eventType < HvLpEvent_Type_NumTypes) {
		if (!lpEventHandlerPaths[eventType]) {
			lpEventHandler[eventType] = NULL;
			/*
			 * We now sleep until all other CPUs have scheduled.
			 * This ensures that the deletion is seen by all
			 * other CPUs, and that the deleted handler isn't
			 * still running on another CPU when we return.
			 */
			synchronize_sched();
			return 0;
		}
	}
	return 1;
}
Beispiel #11
0
int
s390_unregister_adapter_interrupt (adapter_int_handler_t handler)
{
	int ret;
	char dbf_txt[15];

	CIO_TRACE_EVENT (4, "urgaint");

	if (handler == NULL)
		ret = -EINVAL;
	else {
		adapter_handler = NULL;
		synchronize_sched();  /* Allow interrupts to complete. */
		ret = 0;
	}
	sprintf (dbf_txt, "ret:%d", ret);
	CIO_TRACE_EVENT (4, dbf_txt);

	return ret;
}
Beispiel #12
0
/*
 * register for adapter interrupts
 *
 * With HiperSockets the zSeries architecture provides for
 *  means of adapter interrups, pseudo I/O interrupts that are
 *  not tied to an I/O subchannel, but to an adapter. However,
 *  it doesn't disclose the info how to enable/disable them, but
 *  to recognize them only. Perhaps we should consider them
 *  being shared interrupts, and thus build a linked list
 *  of adapter handlers ... to be evaluated ...
 */
int
s390_register_adapter_interrupt (adapter_int_handler_t handler)
{
	int ret;
	char dbf_txt[15];

	CIO_TRACE_EVENT (4, "rgaint");

	if (handler == NULL)
		ret = -EINVAL;
	else
		ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0);
	if (!ret)
		synchronize_sched();  /* Allow interrupts to complete. */

	sprintf (dbf_txt, "ret:%d", ret);
	CIO_TRACE_EVENT (4, dbf_txt);

	return ret;
}
Beispiel #13
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		pr_warn("%s: attempt to take down CPU %u failed\n",
			__func__, cpu);
		goto out_release;
	}

	/*
	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
	 * and RCU users of this state to go away such that all new such users
	 * will observe it.
	 *
	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
	 * not imply sync_sched(), so explicitly call both.
	 *
	 * Do sync before park smpboot threads to take care the rcu boost case.
	 */
#ifdef CONFIG_PREEMPT
	synchronize_sched();
#endif
	synchronize_rcu();

	smpboot_park_threads(cpu);

	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		smpboot_unpark_threads(cpu);
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
static void timer_stop(void)
{
	nmi_adjust_hz(1);
	unregister_die_notifier(&profile_timer_exceptions_nb);
	synchronize_sched();  /* Allow already-started NMIs to complete. */
}
Beispiel #15
0
void rcu_barrier(void)
{
       synchronize_sched();
       synchronize_sched();
       atomic_inc(&rcu_stats.nbarriers);
}
static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
				size_t usize, loff_t *off)
{
	char __user *buf = ubuf;
	unsigned long *cpu_tsc;
	unsigned prev, next;
	int i, err;

	cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
	if (!cpu_tsc)
		return -ENOMEM;

	mutex_lock(&mce_chrdev_read_mutex);

	if (!mce_apei_read_done) {
		err = __mce_read_apei(&buf, usize);
		if (err || buf != ubuf)
			goto out;
	}

	next = mce_log_get_idx_check(mcelog.next);

	/* Only supports full reads right now */
	err = -EINVAL;
	if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
		goto out;

	err = 0;
	prev = 0;
	do {
		for (i = prev; i < next; i++) {
			unsigned long start = jiffies;
			struct mce *m = &mcelog.entry[i];

			while (!m->finished) {
				if (time_after_eq(jiffies, start + 2)) {
					memset(m, 0, sizeof(*m));
					goto timeout;
				}
				cpu_relax();
			}
			smp_rmb();
			err |= copy_to_user(buf, m, sizeof(*m));
			buf += sizeof(*m);
timeout:
			;
		}

		memset(mcelog.entry + prev, 0,
		       (next - prev) * sizeof(struct mce));
		prev = next;
		next = cmpxchg(&mcelog.next, prev, 0);
	} while (next != prev);

	synchronize_sched();

	/*
	 * Collect entries that were still getting written before the
	 * synchronize.
	 */
	on_each_cpu(collect_tscs, cpu_tsc, 1);

	for (i = next; i < MCE_LOG_LEN; i++) {
		struct mce *m = &mcelog.entry[i];

		if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
			err |= copy_to_user(buf, m, sizeof(*m));
			smp_rmb();
			buf += sizeof(*m);
			memset(m, 0, sizeof(*m));
		}
	}

	if (err)
		err = -EFAULT;

out:
	mutex_unlock(&mce_chrdev_read_mutex);
	kfree(cpu_tsc);

	return err ? err : buf - ubuf;
}
Beispiel #17
0
static void timer_stop(void)
{
	unregister_die_notifier(&profile_timer_exceptions_nb);
	synchronize_sched();  
}