/* Restart all threads that are waiting on the condition variable */ bool_t CC_CALL cc_condition_broadcast(cc_condition_t *cond) { if ( ! cond ) { CC_ERROR_LOG(_T("Passed a NULL condition variable")); return FALSE; } /* If there are waiting threads not already signalled, then signal the condition and wait for the thread to respond. */ cc_mutex_lock(cond->lock); if ( cond->waiting > cond->signals ) { int i, num_waiting; num_waiting = (cond->waiting - cond->signals); cond->signals = cond->waiting; for ( i=0; i < num_waiting; ++i ) { cc_semaphore_post(cond->wait_sem); } /* Now all released threads are blocked here, waiting for us. Collect them all (and win fabulous prizes!) :-) */ cc_mutex_unlock(cond->lock); for ( i=0; i < num_waiting; ++i ) { cc_semaphore_wait(cond->wait_done); } } else { cc_mutex_unlock(cond->lock); } return TRUE; }
bool_t CC_CALL cc_condition_wait_timeout(cc_condition_t *cond, cc_mutex_t *mutex, uint32_t ms) { int retval = FALSE; if ( ! cond ) { CC_ERROR_LOG(_T("Passed a NULL condition variable")); return FALSE; } /* Obtain the protection mutex, and increment the number of waiters. This allows the signal mechanism to only perform a signal if there are waiting threads. */ cc_mutex_lock(cond->lock); ++cond->waiting; cc_mutex_unlock(cond->lock); /* Unlock the mutex, as is required by condition variable semantics */ cc_mutex_unlock(mutex); /* Wait for a signal */ if ( ms == CC_MUTEX_MAXWAIT ) { retval = cc_semaphore_wait(cond->wait_sem); } else { retval = cc_semaphore_wait_timeout(cond->wait_sem, ms); } /* Let the signaler know we have completed the wait, otherwise the signaler can race ahead and get the condition semaphore if we are stopped between the mutex unlock and semaphore wait, giving a deadlock. See the following URL for details: http://www-classic.be.com/aboutbe/benewsletter/volume_III/Issue40.html */ cc_mutex_lock(cond->lock); if ( cond->signals > 0 ) { /* If we timed out, we need to eat a condition signal */ if ( retval > 0 ) { cc_semaphore_wait(cond->wait_sem); } /* We always notify the signal thread that we are done */ cc_semaphore_post(cond->wait_done); /* Signal handshake complete */ --cond->signals; } --cond->waiting; cc_mutex_unlock(cond->lock); /* Lock the mutex, as is required by condition variable semantics */ cc_mutex_lock(mutex); return retval >= 0 ? TRUE : FALSE; }
int CC_CALL cc_semaphore_wait_timeout(cc_semaphore_t *sem, uint32_t timeout) { int retval; if ( ! sem ) { CC_ERROR_LOG(_T("Passed a NULL semaphore")); return -1; } /* A timeout of 0 is an easy case */ if ( timeout == 0 ) { return cc_semaphore_try_wait(sem); } cc_mutex_lock(sem->count_lock); ++sem->waiters_count; retval = 0; while (sem->count == 0 && retval != CC_MUTEX_TIMEDOUT) { retval = cc_condition_wait_timeout(sem->count_nonzero, sem->count_lock,timeout); } --sem->waiters_count; if(retval == TRUE){ --sem->count; } cc_mutex_unlock(sem->count_lock); return retval; }
bool_t CC_CALL cckit_init_sync(int32_t count) { int32_t i = 0; if (sync_queue_lock) { return TRUE; } cc_double_link_cleanup(&sync_queue_actived); cc_double_link_cleanup(&sync_queue_released); cc_double_link_cleanup(&sync_data_released); sync_queue_lock = cc_create_mutex(); if (sync_queue_lock == NULL) { return FALSE; } cc_mutex_lock(sync_queue_lock); for (i = 0; i < count; i++) { cckit_sync_t *d = (cckit_sync_t *)cc_malloc(sizeof(cckit_sync_t)); cc_double_link_push(&sync_queue_released, &d->base); } cc_mutex_unlock(sync_queue_lock); return TRUE; }
cckit_data_buffer_t* CC_CALL cckit_data_buffer_push(cckit_data_buffer_t **b) { cc_double_iterator_t* base = NULL; if (b && *b) { return (cckit_data_buffer_t*)*b; } if (_global_cckit.buffer_lock) { cc_mutex_lock(_global_cckit.buffer_lock); } base = cc_double_link_pop_front(&_global_cckit.buffer_idle); if (base) { *b = cc_upcast(base, cckit_data_buffer_t, base); } else { *b = (cckit_data_buffer_t *)cc_malloc(sizeof(cckit_data_buffer_t)); } if (*b) { (*b)->length = 0; cc_double_link_push_back(&_global_cckit.buffer_actived, &(*b)->base); } if (_global_cckit.buffer_lock) { cc_mutex_unlock(_global_cckit.buffer_lock); } return *b; }
/* Returns the current count of the semaphore */ uint32_t CC_CALL cc_semaphore_value(cc_semaphore_t *sem) { int ret = 0; if ( sem ) { cc_mutex_lock(sem->count_lock); ret = sem->count; cc_mutex_unlock(sem->count_lock); } return (uint32_t)ret; }
int CC_CALL cc_semaphore_try_wait(cc_semaphore_t *sem) { int ret = CC_MUTEX_TIMEDOUT; if ( sem ) { cc_mutex_lock(sem->count_lock); if(sem->count > 0) { --sem->count; ret = 0; } cc_mutex_unlock(sem->count_lock); } return (uint32_t)ret; }
/* Restart one of the threads that are waiting on the condition variable */ bool_t CC_CALL cc_condition_signal(cc_condition_t *cond) { if ( ! cond ) { CC_ERROR_LOG(_T("Passed a NULL condition variable")); return FALSE; } /* If there are waiting threads not already signalled, then signal the condition and wait for the thread to respond. */ cc_mutex_lock(cond->lock); if ( cond->waiting > cond->signals ) { ++cond->signals; cc_semaphore_post(cond->wait_sem); cc_mutex_unlock(cond->lock); cc_semaphore_wait(cond->wait_done); } else { cc_mutex_unlock(cond->lock); } return TRUE; }
bool_t CC_CALL cc_semaphore_post(cc_semaphore_t *sem) { if ( ! sem ) { CC_ERROR_LOG(_T("Passed a NULL semaphore")); return FALSE; } cc_mutex_lock(sem->count_lock); if(sem->waiters_count > 0) cc_condition_signal(sem->count_nonzero); ++sem->count; cc_mutex_unlock(sem->count_lock); return TRUE; }
/* Free the semaphore */ void CC_CALL cc_destroy_semaphore(cc_semaphore_t **sem) { if ( *sem ) { (*sem)->count = 0xffffffff; while ((*sem)->waiters_count > 0) { cc_condition_signal((*sem)->count_nonzero); cc_sleep(10); } cc_destroy_condition(&(*sem)->count_nonzero); if((*sem)->count_lock) { cc_mutex_lock((*sem)->count_lock); cc_mutex_unlock((*sem)->count_lock); cc_destroy_mutex(&(*sem)->count_lock); } safe_free((*sem)); } }
bool_t CC_CALL cckit_data_buffer_pop(cckit_data_buffer_t **b) { if (b == NULL || *b == NULL) { return FALSE; } if (_global_cckit.buffer_lock) { cc_mutex_lock(_global_cckit.buffer_lock); } cc_double_link_swap(&_global_cckit.buffer_actived, &_global_cckit.buffer_idle, &(*b)->base); *b = NULL; if (_global_cckit.buffer_lock) { cc_mutex_unlock(_global_cckit.buffer_lock); } return TRUE; }
void cc_mutex_global_unlock(void) { (void) cc_mutex_unlock(cc_global_mutex); }
void coin_atexit_func(const char * name, coin_atexit_f * f, coin_atexit_priorities priority) { #ifdef COIN_THREADSAFE /* This function being not mt-safe seemed to be the only cause of problems when constructing SoNode-derived classes in parallel threads. So for that extra bit of undocumented, unofficial, under-the-table mt-safety, this should take care of it. */ /* Need this test, since the thread system calls coin_atexit before tidbits is initialized. */ if (atexit_list_monitor) { cc_mutex_lock(atexit_list_monitor); } #endif /* COIN_THREADSAFE */ assert(!isexiting && "tried to attach an atexit function while exiting"); if (atexit_list == NULL) { atexit_list = cc_list_construct(); /* The atexit() registration was disabled, since it has proved dangerous to let the C library trigger the callbacks. There is for instance the known problem with deallocating resources from a DLL we're using (like OpenAL), as the DLL could already have been "offloaded" or simply cleaned up / cleaned out when our callback triggers. We therefore now force cleanup at exit to be done explicitly from application code by invoking the SoDB::finish() method, which then invokes the coin_atexit_cleanup() method. Note that this scheme is not to be considered a temporary workaround -- application-exit clean-up should be done explicitly by invoking SoDB::finish(). mortene. */ /* (void)atexit(coin_atexit_cleanup); */ } { tb_atexit_data * data; data = (tb_atexit_data*) malloc(sizeof(tb_atexit_data)); data->name = strdup(name); data->func = f; data->priority = priority; data->cnt = cc_list_get_length(atexit_list); cc_list_append(atexit_list, data); } #ifdef COIN_THREADSAFE if (atexit_list_monitor) { cc_mutex_unlock(atexit_list_monitor); } #endif /* COIN_THREADSAFE */ }