/* * Wait for an event to happen. * * If holds != NULL, the caller holds the mutex, which will be released * before going to sleep. The thread calling thread_signal_cond *must* hold * the same mutex. * * The holds mutex is reacquired after wakeup. */ int thread_wait_cond (void *event, dk_mutex_t *holds, TVAL timeout) { thread_t *thr = current_thread; dk_mutex_t *mtx; int ok; thr->thr_status = WAITEVENT; thr->thr_event = event ? event : &_ev_never; thr->thr_event_pipe = -1; mtx = holds ? holds : _q_lock; Q_LOCK (); do { thread_queue_to (&_waitq, thr); _thread_num_wait++; if (holds) Q_UNLOCK (); if (timeout == TV_INFINITE) ok = pthread_cond_wait (thr->thr_cv, &mtx->mtx_mtx); else { struct timespec to; struct timeval now; gettimeofday (&now, NULL); to.tv_sec = now.tv_sec + timeout / 1000; to.tv_nsec = now.tv_usec + 1000 * (timeout % 1000); if (to.tv_nsec > 1000000) { to.tv_nsec -= 1000000; to.tv_sec++; } ok = pthread_cond_timedwait (thr->thr_cv, &mtx->mtx_mtx, &to); } if (holds) Q_LOCK (); thread_queue_remove (&_waitq, thr); _thread_num_wait--; } while (ok == 0 && thr->thr_event); Q_UNLOCK (); CKRET (ok); failed: thr->thr_status = RUNNING; return thr->thr_event == NULL ? 0 : -1; }
void qRemove(queueObj_t *qObj, void *data, size_t nSize) { Q_LOCK(&qObj->qMutex); NODE *node = qObj->qHead; if (!qObj->qHead || !qObj->qTail) { goto end; } if(qObj->qHead == qObj->qTail) { node = qObj->qHead; qObj->qHead = qObj->qTail = NULL; --qObj->qCount; } else { qObj->qHead = qObj->qHead->next; node->next = NULL; qObj->qHead->prev = NULL; --qObj->qCount; } if(data) { memcpy(data, node->data, nSize); } end: FREE(node); Q_UNLOCK(&qObj->qMutex); }
/* * Wake up all threads waiting for an event. */ int thread_signal_cond (void *event) { thread_t *thr; thread_t *next; int count; char dummy; count = 0; Q_LOCK (); for (thr = (thread_t *) _waitq.thq_head.thr_next; thr != (thread_t *) &_waitq.thq_head; thr = next) { next = (thread_t *) thr->thr_hdr.thr_next; if (thr->thr_event == event) { thr->thr_event = NULL; if (thr->thr_event_pipe == -1) pthread_cond_signal (thr->thr_cv); else /* * Wake up the select * XXX Should fix this - only one thread can safely wait * for an event in thread_select at a time. */ write (thr->thr_event_pipe, &dummy, 1); count++; } } Q_UNLOCK (); return count; }
int thread_release_dead_threads (int leave_count) { thread_t *thr; int rc; long thread_killed = 0; thread_queue_t term; Q_LOCK (); if (_deadq.thq_count <= leave_count) { Q_UNLOCK (); return 0; } thread_queue_init (&term); while (_deadq.thq_count > leave_count) { thr = thread_queue_from (&_deadq); if (!thr) break; _thread_num_dead--; thread_queue_to (&term, thr); } Q_UNLOCK (); while (NULL != (thr = thread_queue_from (&term))) { thr->thr_status = TERMINATE; rc = pthread_cond_signal ((pthread_cond_t *) thr->thr_cv); CKRET (rc); thread_killed++; } #if 0 if (thread_killed) log_info ("%ld OS threads released", thread_killed); #endif return thread_killed; failed: GPF_T1("Thread restart failed"); return 0; }
void qDestroy(queueObj_t *qObj) { if(!qObj) { return; } Q_LOCK(&qObj->qMutex); NODE *node = NULL; while(qObj->qHead != NULL) { node = qObj->qHead; qObj->qHead = qObj->qHead->next; FREE(node); } qObj->qTail = qObj->qHead = 0; Q_UNLOCK(&qObj->qMutex); pthread_mutex_destroy(&qObj->qMutex); }
void qInsert(queueObj_t *qObj, void *data) { if (!qObj || !data) { return; } Q_LOCK(&qObj->qMutex); NODE *node = allocate_node(data); if (!qObj->qHead) { qObj->qHead = qObj->qTail = node; ++qObj->qCount; } else { qObj->qTail->next = node; node->prev = qObj->qTail; qObj->qTail = node; ++qObj->qCount; } Q_UNLOCK(&qObj->qMutex); }
int thread_select (int n, fd_set *rfds, fd_set *wfds, void *event, TVAL timeout) { thread_t *thr = current_thread; struct timeval *ptv, tv; char dummy; int rc; if (timeout == TV_INFINITE) ptv = NULL; else { tv.tv_sec = timeout / 1000; tv.tv_usec = (timeout % 1000) * 1000; ptv = &tv; } if (event) { thr->thr_event = event; thr->thr_event_pipe = _ev_pipes[1]; if (rfds == NULL) rfds = &thr->thr_rfds; FD_SET (_ev_pipes[0], rfds); if (_ev_pipes[0] >= n) n = _ev_pipes[0] + 1; Q_LOCK (); thread_queue_to (&_waitq, thr); Q_UNLOCK (); } _thread_num_wait++; thr->thr_status = WAITEVENT; for (;;) { if ((rc = select (n, rfds, wfds, NULL, ptv)) == -1) { switch (errno) { case EINTR: continue; default: break; } thr_errno = errno; } else thr_errno = 0; break; } thr->thr_status = RUNNING; _thread_num_wait--; if (event) { thr->thr_event = NULL; thr->thr_event_pipe = -1; if (rc > 0 && FD_ISSET (_ev_pipes[0], rfds)) { read (_ev_pipes[0], &dummy, 1); rc = 0; } Q_LOCK (); thread_queue_remove (&_waitq, thr); Q_UNLOCK (); } return rc; }
void thread_exit (int n) { thread_t *thr = current_thread; volatile int is_attached = thr->thr_attached; if (thr == _main_thread) { call_exit (n); } thr->thr_retcode = n; thr->thr_status = DEAD; if (is_attached) { thr->thr_status = TERMINATE; goto terminate; } Q_LOCK (); thread_queue_to (&_deadq, thr); _thread_num_dead++; do { int rc = pthread_cond_wait ((pthread_cond_t *) thr->thr_cv, (pthread_mutex_t*) &_q_lock->mtx_mtx); CKRET (rc); } while (thr->thr_status == DEAD); Q_UNLOCK (); if (thr->thr_status == TERMINATE) goto terminate; /* Jumps back into _thread_boot */ longjmp (thr->thr_init_context, 1); failed: thread_queue_remove (&_deadq, thr); _thread_num_dead--; Q_UNLOCK (); terminate: if (thr->thr_status == TERMINATE) { #ifndef OLD_PTHREADS pthread_detach (* (pthread_t *)thr->thr_handle); #else pthread_detach ( (pthread_t *)thr->thr_handle); #endif _thread_free_attributes (thr); dk_free ((void *) thr->thr_cv, sizeof (pthread_cond_t)); semaphore_free (thr->thr_sem); semaphore_free (thr->thr_schedule_sem); dk_free (thr->thr_handle, sizeof (pthread_t)); thr_free_alloc_cache (thr); dk_free (thr, sizeof (thread_t)); } if (!is_attached) { _thread_num_total--; pthread_exit ((void *) 1L); } }
thread_t * thread_create ( thread_init_func initial_function, unsigned long stack_size, void *initial_argument) { thread_t *thr; int rc; assert (_main_thread != NULL); if (stack_size == 0) stack_size = THREAD_STACK_SIZE; #if (SIZEOF_VOID_P == 8) stack_size *= 2; #endif #if defined (__x86_64 ) && defined (SOLARIS) /*GK: the LDAP on that platform requires that */ stack_size *= 2; #endif #ifdef HPUX_ITANIUM64 stack_size += 8 * 8192; #endif stack_size = ((stack_size / 8192) + 1) * 8192; #if defined (PTHREAD_STACK_MIN) if (stack_size < PTHREAD_STACK_MIN) { stack_size = PTHREAD_STACK_MIN; } #endif /* Any free threads with the right stack size? */ Q_LOCK (); for (thr = (thread_t *) _deadq.thq_head.thr_next; thr != (thread_t *) &_deadq.thq_head; thr = (thread_t *) thr->thr_hdr.thr_next) { /* if (thr->thr_stack_size >= stack_size) */ break; } Q_UNLOCK (); /* No free threads, create a new one */ if (thr == (thread_t *) &_deadq.thq_head) { #ifndef OLD_PTHREADS size_t os_stack_size = stack_size; #endif thr = thread_alloc (); thr->thr_initial_function = initial_function; thr->thr_initial_argument = initial_argument; thr->thr_stack_size = stack_size; if (thr->thr_cv == NULL) goto failed; #ifdef HPUX_ITANIUM64 if (stack_size > PTHREAD_STACK_MIN) { size_t s, rses; pthread_attr_getstacksize (&_thread_attr, &s); pthread_attr_getrsestacksize_np (&_thread_attr, &rses); log_error ("default rses=%d stack=%d : %m", rses,s); } #endif #ifndef OLD_PTHREADS # if defined(HAVE_PTHREAD_ATTR_SETSTACKSIZE) rc = pthread_attr_setstacksize (&_thread_attr, stack_size); if (rc) { log_error ("Failed setting the OS thread stack size to %d : %m", stack_size); } # endif #if defined(HAVE_PTHREAD_ATTR_GETSTACKSIZE) if (0 == pthread_attr_getstacksize (&_thread_attr, &os_stack_size)) { if (os_stack_size > 4 * 8192) stack_size = thr->thr_stack_size = ((unsigned long) os_stack_size) - 4 * 8192; } #endif #ifdef HPUX_ITANIUM64 if (stack_size > PTHREAD_STACK_MIN) { size_t rsestack_size = stack_size / 2; rc = pthread_attr_setrsestacksize_np (&_thread_attr, rsestack_size); if (rc) { log_error ("Failed setting the OS thread 'rse' stack size to %d (plain stack size set to %d) : %m", rsestack_size, stack_size); } thr->thr_stack_size /= 2; } #endif rc = pthread_create ((pthread_t *) thr->thr_handle, &_thread_attr, _thread_boot, thr); CKRET (rc); /* rc = pthread_detach (*(pthread_t *) thr->thr_handle); */ /* CKRET (rc); */ #else /* OLD_PTHREAD */ rc = pthread_attr_setstacksize (&_thread_attr, stack_size); CKRET (rc); rc = pthread_create ((pthread_t *) thr->thr_handle, _thread_attr, _thread_boot, thr); CKRET (rc); /* rc = pthread_detach ((pthread_t *) thr->thr_handle); */ /* CKRET (rc); */ #endif _thread_num_total++; #if 0 if (DO_LOG(LOG_THR)) log_info ("THRD_0 OS threads create (%i)", _thread_num_total); #endif thread_set_priority (thr, NORMAL_PRIORITY); } else { Q_LOCK (); thread_queue_remove (&_deadq, thr); _thread_num_dead--; Q_UNLOCK (); assert (thr->thr_status == DEAD); /* Set new context for the thread and resume it */ thr->thr_initial_function = initial_function; thr->thr_initial_argument = initial_argument; thr->thr_status = RUNNABLE; rc = pthread_cond_signal ((pthread_cond_t *) thr->thr_cv); CKRET (rc); /* if (DO_LOG(LOG_THR)) log_info ("THRD_3 OS threads reuse. Info threads - total (%ld) wait (%ld) dead (%ld)", _thread_num_total, _thread_num_wait, _thread_num_dead);*/ } return thr; failed: if (thr->thr_status == RUNNABLE) { _thread_free_attributes (thr); dk_free (thr, sizeof (thread_t)); } return NULL; }