void _gop_dummy_submit_op(void *arg, op_generic_t *op) { int dolock = 0; log_printf(15, "gid=%d\n", gop_id(op)); // if (op->base.cb != NULL) { //** gop is on a q apr_thread_mutex_lock(gd_lock); push(gd_stack, op); apr_thread_cond_signal(gd_cond); apr_thread_mutex_unlock(gd_lock); return; // } //*-------* This isn't executed below ----------- if (apr_thread_mutex_trylock(op->base.ctl->lock) != APR_SUCCESS) dolock = 1; unlock_gop(op); //log_printf(15, "dolock=%d gid=%d err=%d APR_SUCCESS=%d\n", dolock, gop_id(op), err, APR_SUCCESS); op->base.started_execution = 1; gop_mark_completed(op, op->base.status); if (dolock == 1) { lock_gop(op); } //** lock_gop is a macro so need the {} return; }
CallStacksLogLock::CallStacksLogLock() : mLocked(false), mOK(false) { if (!gCallStacksLogMutexp) { mOK = true; return; } const int MAX_RETRIES = 5; for (int attempts = 0; attempts < MAX_RETRIES; ++attempts) { apr_status_t s = apr_thread_mutex_trylock(gCallStacksLogMutexp); if (!APR_STATUS_IS_EBUSY(s)) { mLocked = true; mOK = true; return; } ms_sleep(1); } // We're hosed, we can't get the mutex. Blah. std::cerr << "CallStacksLogLock::CallStacksLogLock: failed to get mutex for log" << std::endl; }
static void nested_wait(abts_case *tc, void *data) { toolbox_fnptr_t *fnptr = data; toolbox_t box; apr_status_t rv, retval; apr_thread_cond_t *cond = NULL; apr_thread_t *thread = NULL; apr_thread_mutex_t *mutex = NULL; rv = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_NESTED, p); ABTS_SUCCESS(rv); ABTS_PTR_NOTNULL(tc, mutex); rv = apr_thread_cond_create(&cond, p); ABTS_SUCCESS(rv); ABTS_PTR_NOTNULL(tc, cond); rv = apr_thread_mutex_lock(mutex); ABTS_SUCCESS(rv); box.tc = tc; box.cond = cond; box.mutex = mutex; box.func = fnptr->func; rv = apr_thread_create(&thread, NULL, thread_routine, &box, p); ABTS_SUCCESS(rv); rv = apr_thread_mutex_unlock(mutex); ABTS_SUCCESS(rv); /* yield the processor */ apr_sleep(500000); rv = apr_thread_cond_signal(cond); ABTS_SUCCESS(rv); rv = apr_thread_join(&retval, thread); ABTS_SUCCESS(rv); rv = apr_thread_mutex_trylock(mutex); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_EBUSY(rv)); rv = apr_thread_mutex_trylock(mutex); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_EBUSY(rv)); }
bool LLMutexImpl::try_lock() { apr_status_t status = apr_thread_mutex_trylock(mMutexImpl); if(APR_STATUS_IS_EBUSY(status)) return false; APRExceptionThrower(status); return true; }
int _lru_free_mem(cache_t *c, segment_t *pseg, ex_off_t bytes_to_free) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; cache_segment_t *s; cache_page_t *p; page_lru_t *lp; Stack_ele_t *ele; apr_thread_mutex_t *plock; ex_off_t total_bytes, pending_bytes; int gotlock, count, bits, err; total_bytes = 0; err = 0; log_printf(15, "START seg=" XIDT " bytes_to_free=" XOT " bytes_used=" XOT " stack_size=%d\n", segment_id(pseg), bytes_to_free, cp->bytes_used, stack_size(cp->stack)); move_to_bottom(cp->stack); ele = get_ptr(cp->stack); while ((total_bytes < bytes_to_free) && (ele != NULL) && (err == 0)) { p = (cache_page_t *)get_stack_ele_data(ele); lp = (page_lru_t *)p->priv; plock = p->seg->lock; gotlock = apr_thread_mutex_trylock(plock); if ((gotlock == APR_SUCCESS) || (p->seg == pseg)) { bits = atomic_get(p->bit_fields); if ((bits & C_TORELEASE) == 0) { //** Skip it if already flagged for removal count = atomic_get(p->access_pending[CACHE_READ]) + atomic_get(p->access_pending[CACHE_WRITE]) + atomic_get(p->access_pending[CACHE_FLUSH]); if (count == 0) { //** No one is using it s = (cache_segment_t *)p->seg->priv; if ((bits & C_ISDIRTY) == 0) { //** Don't have to flush it total_bytes += s->page_size; log_printf(15, "lru_free_mem: freeing page seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits); list_remove(s->pages, &(p->offset), p); //** Have to do this here cause p->offset is the key var delete_current(cp->stack, 1, 0); if (p->data[0].ptr) free(p->data[0].ptr); if (p->data[1].ptr) free(p->data[1].ptr); free(lp); } else { //** Got to flush the page first err = 1; } } else { err = 1; } } if (gotlock == APR_SUCCESS) apr_thread_mutex_unlock(plock); } else { err = 1; } if ((total_bytes < bytes_to_free) && (err == 0)) ele = get_ptr(cp->stack); } cp->bytes_used -= total_bytes; pending_bytes = bytes_to_free - total_bytes; log_printf(15, "END seg=" XIDT " bytes_to_free=" XOT " pending_bytes=" XOT " bytes_used=" XOT "\n", segment_id(pseg), bytes_to_free, pending_bytes, cp->bytes_used); return(pending_bytes); }
// non-blocking, but does do a lock/unlock so not free bool LLMutexBase::isLocked() const { if (mLockingThread.equals_current_thread_inline()) return false; // A call to lock() won't block. if (APR_STATUS_IS_EBUSY(apr_thread_mutex_trylock(mAPRMutexp))) return true; apr_thread_mutex_unlock(mAPRMutexp); return false; }
bool LLMutex::isLocked() { apr_status_t status = apr_thread_mutex_trylock(mAPRMutexp); if (APR_STATUS_IS_EBUSY(status)) { return true; } else { apr_thread_mutex_unlock(mAPRMutexp); return false; } }
bool LLMutexBase::tryLock() { if (mLockingThread.equals_current_thread_inline()) { //redundant lock mCount++; return true; } bool success = !APR_STATUS_IS_EBUSY(apr_thread_mutex_trylock(mAPRMutexp)); if (success) { mLockingThread.reset_inline(); } return success; }
bool y_try_lock (void * self) { bool acquired = false; #if APR_HAS_THREADS y_Object * obj = y_OBJECT (self); if ( obj ) { apr_status_t status = apr_thread_mutex_trylock (obj->protect->mutex); if ( ! APR_STATUS_IS_EBUSY (status)) { acquired = true; } } #endif /* APR_HAS_THREADS */ return acquired; }
void LLMutexBase::lock() { if (mLockingThread.equals_current_thread_inline()) { //redundant lock mCount++; return; } if (APR_STATUS_IS_EBUSY(apr_thread_mutex_trylock(mAPRMutexp))) { if (AIThreadID::in_main_thread_inline()) { LLFastTimer ft1(FT_WAIT_FOR_MUTEX); apr_thread_mutex_lock(mAPRMutexp); } else { apr_thread_mutex_lock(mAPRMutexp); } } mLockingThread.reset_inline(); }
SWITCH_DECLARE(switch_status_t) switch_mutex_trylock(switch_mutex_t *lock) { return apr_thread_mutex_trylock(lock); }
/** * etch_apr_queue_trylock() * added by Cisco to access lock externally */ int etch_apr_queue_trylock(etch_apr_queue_t *queue) { return apr_thread_mutex_trylock(queue->one_big_mutex); }
APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex) { if (mutex) return apr_thread_mutex_trylock(mutex->mutex); return APR_ENOLOCK; }