static void * schro_thread_main (void *ptr) { void (*func) (void *); void *priv; SchroThread *thread = ptr; SchroAsync *async = thread->async; int ret; /* thread starts with async->mutex locked */ pthread_setspecific (domain_key, (void *) (unsigned long) thread->exec_domain); async->n_threads_running++; thread->busy = FALSE; while (1) { /* check for deaths each time */ if (async->stop != RUNNING) { async->n_idle++; thread->busy = FALSE; pthread_cond_signal (&async->app_cond); if (async->stop == DIE) { async->n_threads_running--; pthread_mutex_unlock (&async->mutex); SCHRO_DEBUG ("thread %d: dying", thread->index); return NULL; } SCHRO_DEBUG ("thread %d: stopping (until restarted)", thread->index); pthread_cond_wait (&async->thread_cond, &async->mutex); SCHRO_DEBUG ("thread %d: resuming", thread->index); async->n_idle--; continue; } if (thread->busy == 0) { async->n_idle++; SCHRO_DEBUG ("thread %d: idle", thread->index); pthread_cond_wait (&async->thread_cond, &async->mutex); SCHRO_DEBUG ("thread %d: got signal", thread->index); async->n_idle--; thread->busy = TRUE; /* check for stop requests before doing work */ continue; } if (1) { /* avoiding indent change */ ret = async->schedule (async->schedule_closure, thread->exec_domain); /* FIXME ignoring ret */ if (!async->task.task_func) { thread->busy = FALSE; continue; } thread->busy = TRUE; func = async->task.task_func; priv = async->task.priv; async->task.task_func = NULL; if (async->n_idle > 0) { pthread_cond_signal (&async->thread_cond); } pthread_mutex_unlock (&async->mutex); SCHRO_DEBUG ("thread %d: running", thread->index); func (priv); SCHRO_DEBUG ("thread %d: done", thread->index); pthread_mutex_lock (&async->mutex); async->complete (priv); pthread_cond_signal (&async->app_cond); #if defined HAVE_CUDA || defined HAVE_OPENGL /* FIXME */ /* This is required because we don't have a better mechanism * for indicating to threads in other exec domains that it is * their turn to run. It's mostly harmless, although causes * a lot of unnecessary wakeups in some cases. */ pthread_cond_broadcast (&async->thread_cond); #endif } } }
static unsigned int __stdcall schro_thread_main (void *ptr) { void (*func)(void *); void *priv; SchroThread *thread = ptr; SchroAsync *async = thread->async; int ret; /* thread starts with async->mutex locked */ TlsSetValue (domain_key, (void *)(unsigned long)thread->exec_domain); async->n_threads_running++; while (1) { async->n_idle++; thread->busy = FALSE; LeaveCriticalSection (&async->mutex); SCHRO_DEBUG("thread %d: idle, waiting for event", thread->index); WaitForSingleObject (thread->event, INFINITE); SCHRO_DEBUG("thread %d: got event", thread->index); EnterCriticalSection (&async->mutex); async->n_idle--; thread->busy = TRUE; if (async->stop) { SetEvent (async->app_event); async->n_threads_running--; LeaveCriticalSection (&async->mutex); SCHRO_DEBUG("thread %d: stopping", thread->index); return 0; } ret = async->schedule (async->schedule_closure, thread->exec_domain); /* FIXME ignoring ret */ if (!async->task_func) { continue; } func = async->task_func; priv = async->task_priv; async->task_func = NULL; LeaveCriticalSection (&async->mutex); SCHRO_DEBUG("thread %d: running", thread->index); func (priv); SCHRO_DEBUG("thread %d: done", thread->index); EnterCriticalSection (&async->mutex); async->complete (priv); SetEvent (async->app_event); #ifdef HAVE_CUDA /* FIXME */ /* This is required because we don't have a better mechanism * for indicating to threads in other exec domains that it is * their turn to run. It's mostly harmless, although causes * a lot of unnecessary wakeups in some cases. */ { int i; for(i=0;i<async->n_threads) { SetEvent (async->thread_event); } } #endif } }