Beispiel #1
0
/* This function is used to run a function from the main thread.
 Sync_func is the function pointer that will run from main thread
 The function can have two long in parameters and must return long */
long g_xrdp_sync(long(*sync_func)(long param1, long param2), long sync_param1, long sync_param2)
{
	long sync_result;
	int sync_command;

	/* If the function is called from the main thread, the function can
	 * be called directly. g_threadid= main thread ID*/
	if (tc_threadid_equal(tc_get_threadid(), g_threadid))
	{
		/* this is the main thread, call the function directly */
		/* in fork mode, this always happens too */
		sync_result = sync_func(sync_param1, sync_param2);
		/*g_writeln("g_xrdp_sync processed IN main thread -> continue");*/
	}
	else
	{
		/* All threads have to wait here until the main thread
		 * process the function. g_process_waiting_function() is called
		 * from the listening thread. g_process_waiting_function() process the function*/
		tc_mutex_lock(g_sync1_mutex);
		tc_mutex_lock(g_sync_mutex);
		g_sync_param1 = sync_param1;
		g_sync_param2 = sync_param2;
		g_sync_func = sync_func;
		/* set a value THREAD_WAITING so the g_process_waiting_function function
		 * know if any function must be processed */
		g_sync_command = THREAD_WAITING;
		tc_mutex_unlock(g_sync_mutex);
		/* set this event so that the main thread know if
		 * g_process_waiting_function() must be called */
		SetEvent(g_SyncEvent);

		do
		{
			g_sleep(100);
			tc_mutex_lock(g_sync_mutex);
			/* load new value from global to see if the g_process_waiting_function()
			 * function has processed the function */
			sync_command = g_sync_command;
			sync_result = g_sync_result;
			tc_mutex_unlock(g_sync_mutex);
		} while (sync_command != 0); /* loop until g_process_waiting_function()

		 * has processed the request */
		tc_mutex_unlock(g_sync1_mutex);
		/*g_writeln("g_xrdp_sync processed BY main thread -> continue");*/
	}

	return sync_result;
}
Beispiel #2
0
long APP_CC
g_xrdp_sync(long (*sync_func)(long param1, long param2), long sync_param1,
            long sync_param2)
{
    long sync_result;
    int sync_command;

    if (tc_threadid_equal(tc_get_threadid(), g_threadid))
    {
        /* this is the main thread, call the function directly */
        sync_result = sync_func(sync_param1, sync_param2);
    }
    else
    {
        tc_mutex_lock(g_sync1_mutex);
        tc_mutex_lock(g_sync_mutex);
        g_sync_param1 = sync_param1;
        g_sync_param2 = sync_param2;
        g_sync_func = sync_func;
        g_sync_command = 100;
        tc_mutex_unlock(g_sync_mutex);
        g_set_wait_obj(g_sync_event);

        do
        {
            g_sleep(100);
            tc_mutex_lock(g_sync_mutex);
            sync_command = g_sync_command;
            sync_result = g_sync_result;
            tc_mutex_unlock(g_sync_mutex);
        }
        while (sync_command != 0);

        tc_mutex_unlock(g_sync1_mutex);
    }

    return sync_result;
}
Beispiel #3
0
/*
 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
 */
static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
{
	int idx;

	idx = sp->completed;
	mutex_lock(&sp->mutex);

	/*
	 * Check to see if someone else did the work for us while we were
	 * waiting to acquire the lock.  We need -two- advances of
	 * the counter, not just one.  If there was but one, we might have
	 * shown up -after- our helper's first synchronize_sched(), thus
	 * having failed to prevent CPU-reordering races with concurrent
	 * srcu_read_unlock()s on other CPUs (see comment below).  So we
	 * either (1) wait for two or (2) supply the second ourselves.
	 */

	if ((sp->completed - idx) >= 2) {
		mutex_unlock(&sp->mutex);
		return;
	}

	sync_func();  /* Force memory barrier on all CPUs. */

	/*
	 * The preceding synchronize_sched() ensures that any CPU that
	 * sees the new value of sp->completed will also see any preceding
	 * changes to data structures made by this CPU.  This prevents
	 * some other CPU from reordering the accesses in its SRCU
	 * read-side critical section to precede the corresponding
	 * srcu_read_lock() -- ensuring that such references will in
	 * fact be protected.
	 *
	 * So it is now safe to do the flip.
	 */

	idx = sp->completed & 0x1;
	sp->completed++;

	sync_func();  /* Force memory barrier on all CPUs. */

	/*
	 * At this point, because of the preceding synchronize_sched(),
	 * all srcu_read_lock() calls using the old counters have completed.
	 * Their corresponding critical sections might well be still
	 * executing, but the srcu_read_lock() primitives themselves
	 * will have finished executing.  We initially give readers
	 * an arbitrarily chosen 10 microseconds to get out of their
	 * SRCU read-side critical sections, then loop waiting 1/HZ
	 * seconds per iteration.  The 10-microsecond value has done
	 * very well in testing.
	 */

	if (srcu_readers_active_idx(sp, idx))
		udelay(SYNCHRONIZE_SRCU_READER_DELAY);
	while (srcu_readers_active_idx(sp, idx))
		schedule_timeout_interruptible(1);

	sync_func();  /* Force memory barrier on all CPUs. */

	/*
	 * The preceding synchronize_sched() forces all srcu_read_unlock()
	 * primitives that were executing concurrently with the preceding
	 * for_each_possible_cpu() loop to have completed by this point.
	 * More importantly, it also forces the corresponding SRCU read-side
	 * critical sections to have also completed, and the corresponding
	 * references to SRCU-protected data items to be dropped.
	 *
	 * Note:
	 *
	 *	Despite what you might think at first glance, the
	 *	preceding synchronize_sched() -must- be within the
	 *	critical section ended by the following mutex_unlock().
	 *	Otherwise, a task taking the early exit can race
	 *	with a srcu_read_unlock(), which might have executed
	 *	just before the preceding srcu_readers_active() check,
	 *	and whose CPU might have reordered the srcu_read_unlock()
	 *	with the preceding critical section.  In this case, there
	 *	is nothing preventing the synchronize_sched() task that is
	 *	taking the early exit from freeing a data structure that
	 *	is still being referenced (out of order) by the task
	 *	doing the srcu_read_unlock().
	 *
	 *	Alternatively, the comparison with "2" on the early exit
	 *	could be changed to "3", but this increases synchronize_srcu()
	 *	latency for bulk loads.  So the current code is preferred.
	 */

	mutex_unlock(&sp->mutex);
}