void Condition_Wait( Condition *c ) { /* x.P( ); */ xSemaphoreTake( c->x, portMAX_DELAY ); /* waiters++; */ { c->waiters++; } /* x.V( ); */ xSemaphoreGive( c->x ); /* m.Release() */ Lock_Release( c->m ); /* s.P( ); */ xSemaphoreTake( c->s, portMAX_DELAY ); /* h.V( ); */ xSemaphoreGive( c->h ); /* m.Acquire( ); */ Lock_Acquire( c->m ); }
void PC_Destroy(PC *pc) { Lock_Acquire(&pc->lock); assert(List_IsEmpty(&(pc->list))); Lock_Release(&pc->lock); }
void PC_Put(PC *pc, int color) { Lock_Acquire(&pc->lock); assert(color <= pc->maxColor); while(pc->capacity == pc->used){ pc->waitingP++; Cond_Wait(&pc->spaceAvail, &pc->lock); pc->waitingP--; } pc->used++; addItem(color, &(pc->list)); assert(((Item *)List_Last(&(pc->list)))->color == color); if(pc->used == 1){ /* * Went from empty to non-empty. Signal */ if(pc->maxColor == 0){ Cond_Signal(&pc->stuffAvail, &pc->lock); } else{ Cond_Broadcast(&pc->stuffAvail, &pc->lock); } } assert(((Item *)List_Last(&(pc->list)))->color == color); Lock_Release(&pc->lock); }
static void S_release_merge_lock(Indexer *self) { if (self->merge_lock) { Lock_Release(self->merge_lock); DECREF(self->merge_lock); self->merge_lock = NULL; } }
static void S_release_deletion_lock(PolyReader *self) { PolyReaderIVARS *const ivars = PolyReader_IVARS(self); if (ivars->deletion_lock) { Lock_Release(ivars->deletion_lock); DECREF(ivars->deletion_lock); ivars->deletion_lock = NULL; } }
static void S_release_merge_lock(BackgroundMerger *self) { BackgroundMergerIVARS *const ivars = BGMerger_IVARS(self); if (ivars->merge_lock) { Lock_Release(ivars->merge_lock); DECREF(ivars->merge_lock); ivars->merge_lock = NULL; } }
static void S_release_merge_lock(Indexer *self) { IndexerIVARS *const ivars = Indexer_IVARS(self); if (ivars->merge_lock) { Lock_Release(ivars->merge_lock); DECREF(ivars->merge_lock); ivars->merge_lock = NULL; } }
_Return_type_success_(return == 0) int CachedLock_Init_Checked( _Out_ CachedLock* self, unsigned long flags, NitsCallSite cs ) { CachedLock_Pool* pool; int index; ReadWriteLock temp = READWRITELOCK_INITIALIZER; /* One-time initialization. Doesn't matter if called several times. */ if (s_cpuMask == CPU_MASK_UNINITIALIZED) InitializeCachePool(); if (flags & CACHEDLOCK_FLAG_SHARED) { Lock_Acquire(&s_latchPoolLock); if (NitsShouldFault(cs, NitsAutomatic)) return -1; if (s_currentPool == NULL || s_currentPool->mask == POOL_FULL_MASK) { /* The current pool is full. */ s_currentPool = Pool_New(); if (s_currentPool == NULL) return -1; } /* Search the pool for a zero bit. */ pool = s_currentPool; for (index = 0; index < POOL_LINES; index++) if ((pool->mask & ((ptrdiff_t)1 << index)) == 0) break; /* Take ownership of this index. */ pool->mask |= ((ptrdiff_t)1 << index); Lock_Release(&s_latchPoolLock); } else { pool = Pool_New(); if (pool == NULL) return -1; pool->mask = 1; index = 0; } self->pool = pool; self->latches = pool->latches + index; self->lock = temp; self->master = 0; return 0; }
static PyObject * leave(PyObject *self, PyObject *args) { PyObject *obj, *ignored = NULL; SharedObject *shobj; /* Parse the arguments, which are the object and the return value from the enter() call. */ if (!PyArg_ParseTuple(args, "O|O", &obj, &ignored)) return NULL; shobj = SharedObject_FROM_PYOBJECT(obj); /* Unlock the appropriate lock */ Lock_Release(&shobj->lock); Py_INCREF(Py_None); return Py_None; }
void CachedLock_Destroy( _Inout_ CachedLock* self ) { CachedLock_Pool* pool = self->pool; ptrdiff_t index = self->latches - self->pool->latches; Lock_Acquire(&s_latchPoolLock); /* Release ownership of our bit. */ pool->mask &= ~((ptrdiff_t)1 << index); /* Determine if this pool has been orphaned. */ if (pool->mask == 0 && pool != s_currentPool) Pool_Delete(pool); Lock_Release(&s_latchPoolLock); }
int PC_Get(PC *pc, int color) { Lock_Acquire(&pc->lock); assert(color <= pc->maxColor); while(List_IsEmpty(&(pc->list)) || (((Item *)List_First(&(pc->list)))->color != color)){ pc->waitingC++; Cond_Wait(&pc->stuffAvail, &pc->lock); pc->waitingC--; } removeItem(color, &(pc->list)); pc->used--; Cond_Signal(&pc->spaceAvail, &pc->lock); /* * We just took top item off -- another getter may be * able to proceed */ Cond_Broadcast(&pc->stuffAvail, &pc->lock); Lock_Release(&pc->lock); return color; }
void Condition_Unlock( Condition *c ) { Lock_Release( c-> m ); }
void FilePurger_purge(FilePurger *self) { Lock *deletion_lock = IxManager_Make_Deletion_Lock(self->manager); // Obtain deletion lock, purge files, release deletion lock. Lock_Clear_Stale(deletion_lock); if (Lock_Obtain(deletion_lock)) { Folder *folder = self->folder; Hash *failures = Hash_new(0); VArray *purgables; VArray *snapshots; S_discover_unused(self, &purgables, &snapshots); // Attempt to delete entries -- if failure, no big deal, just try // again later. Proceed in reverse lexical order so that directories // get deleted after they've been emptied. VA_Sort(purgables, NULL, NULL); for (uint32_t i = VA_Get_Size(purgables); i--; ) { CharBuf *entry = (CharBuf*)VA_fetch(purgables, i); if (Hash_Fetch(self->disallowed, (Obj*)entry)) { continue; } if (!Folder_Delete(folder, entry)) { if (Folder_Exists(folder, entry)) { Hash_Store(failures, (Obj*)entry, INCREF(&EMPTY)); } } } for (uint32_t i = 0, max = VA_Get_Size(snapshots); i < max; i++) { Snapshot *snapshot = (Snapshot*)VA_Fetch(snapshots, i); bool_t snapshot_has_failures = false; if (Hash_Get_Size(failures)) { // Only delete snapshot files if all of their entries were // successfully deleted. VArray *entries = Snapshot_List(snapshot); for (uint32_t j = VA_Get_Size(entries); j--; ) { CharBuf *entry = (CharBuf*)VA_Fetch(entries, j); if (Hash_Fetch(failures, (Obj*)entry)) { snapshot_has_failures = true; break; } } DECREF(entries); } if (!snapshot_has_failures) { CharBuf *snapfile = Snapshot_Get_Path(snapshot); Folder_Delete(folder, snapfile); } } DECREF(failures); DECREF(purgables); DECREF(snapshots); Lock_Release(deletion_lock); } else { WARN("Can't obtain deletion lock, skipping deletion of " "obsolete files"); } DECREF(deletion_lock); }