PhysicalPageSlot* PhysicalPageSlotQueue::GetSlot() { InterruptsLocker locker; // wait for a free slot to turn up while (fSlots == NULL) { ConditionVariableEntry entry; fFreeSlotCondition.Add(&entry); locker.Unlock(); entry.Wait(); locker.Lock(); } PhysicalPageSlot* slot = fSlots; fSlots = slot->next; return slot; }
void PhysicalPageSlotQueue::GetSlots(PhysicalPageSlot*& slot1, PhysicalPageSlot*& slot2) { InterruptsLocker locker; // wait for two free slot to turn up while (fSlots == NULL || fSlots->next == NULL) { ConditionVariableEntry entry; fFreeSlotsCondition.Add(&entry); locker.Unlock(); entry.Wait(); locker.Lock(); } slot1 = fSlots; slot2 = slot1->next; fSlots = slot2->next; }
static status_t object_cache_maintainer(void*) { while (true) { MutexLocker locker(sMaintenanceLock); // wait for the next request while (sMaintenanceQueue.IsEmpty()) { // perform memory manager maintenance, if needed if (MemoryManager::MaintenanceNeeded()) { locker.Unlock(); MemoryManager::PerformMaintenance(); locker.Lock(); continue; } ConditionVariableEntry entry; sMaintenanceCondition.Add(&entry); locker.Unlock(); entry.Wait(); locker.Lock(); } ObjectCache* cache = sMaintenanceQueue.RemoveHead(); while (true) { bool resizeRequested = cache->maintenance_resize; bool deleteRequested = cache->maintenance_delete; if (!resizeRequested && !deleteRequested) { cache->maintenance_pending = false; cache->maintenance_in_progress = false; break; } cache->maintenance_resize = false; cache->maintenance_in_progress = true; locker.Unlock(); if (deleteRequested) { delete_object_cache_internal(cache); break; } // resize the cache, if necessary MutexLocker cacheLocker(cache->lock); if (resizeRequested) { status_t error = object_cache_reserve_internal(cache, cache->min_object_reserve, 0); if (error != B_OK) { dprintf("object cache resizer: Failed to resize object " "cache %p!\n", cache); break; } } locker.Lock(); } } // never can get here return B_OK; }
static status_t get_port_message(int32 code, size_t bufferSize, uint32 flags, bigtime_t timeout, port_message** _message) { size_t size = sizeof(port_message) + bufferSize; bool limitReached = false; while (true) { if (atomic_add(&sTotalSpaceInUse, size) > int32(kTotalSpaceLimit - size)) { // TODO: add per team limit // We are not allowed to create another heap area, as our // space limit has been reached - just wait until we get // some free space again. limitReached = true; wait: MutexLocker locker(sPortsLock); atomic_add(&sTotalSpaceInUse, -size); // TODO: we don't want to wait - but does that also mean we // shouldn't wait for the area creation? if (limitReached && (flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) return B_WOULD_BLOCK; ConditionVariableEntry entry; sNoSpaceCondition.Add(&entry); locker.Unlock(); status_t status = entry.Wait(flags, timeout); if (status == B_TIMED_OUT) return B_TIMED_OUT; // just try again limitReached = false; continue; } int32 areaChangeCounter = atomic_get(&sAreaChangeCounter); // Quota is fulfilled, try to allocate the buffer port_message* message = (port_message*)heap_memalign(sPortAllocator, 0, size); if (message != NULL) { message->code = code; message->size = bufferSize; *_message = message; return B_OK; } if (atomic_or(&sAllocatingArea, 1) != 0) { // Just wait for someone else to create an area for us goto wait; } if (areaChangeCounter != atomic_get(&sAreaChangeCounter)) { atomic_add(&sTotalSpaceInUse, -size); continue; } // Create a new area for the heap to use addr_t base; area_id area = create_area("port grown buffer", (void**)&base, B_ANY_KERNEL_ADDRESS, kBufferGrowRate, B_NO_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); if (area < 0) { // it's time to let the userland feel our pain sNoSpaceCondition.NotifyAll(); return B_NO_MEMORY; } heap_add_area(sPortAllocator, area, base, kBufferGrowRate); atomic_add(&sAreaChangeCounter, 1); sNoSpaceCondition.NotifyAll(); atomic_and(&sAllocatingArea, 0); } }