void ConsumerThread(int id) { LOG("Starting consumer thread " << id); for(;;) { { Mutex::Lock __(ItemBufferLock); while(QueueSize == 0 && !StopRequested) ItemBufferNotEmpty.Wait(ItemBufferLock); if(StopRequested && QueueSize == 0) break; int Item = ItemBuffer[QueueStartOffset]; QueueSize--; QueueStartOffset++; TotalItemsConsumed++; if(QueueStartOffset == BUFFER_SIZE) QueueStartOffset = 0; LOG("Consumer " << id << ", item " << Item << ", queue size " << QueueSize); } ItemBufferNotFull.Signal(); Sleep(Random(CONSUMER_SLEEP_TIME_MS)); } LOG("Consumer exiting"); }
void ProducerThread() { LOG("Starting producer thread"); for(;;) { Sleep(Random(PRODUCER_SLEEP_TIME_MS)); { Mutex::Lock __(ItemBufferLock); while(QueueSize == BUFFER_SIZE && !StopRequested) ItemBufferNotFull.Wait(ItemBufferLock); if(StopRequested) break; int Item = Random(1000); ItemBuffer[(QueueStartOffset + QueueSize) % BUFFER_SIZE] = Item; QueueSize++; TotalItemsProduced++; LOG("Producer item " << Item << ", queue size " << QueueSize); } ItemBufferNotEmpty.Signal(); } LOG("Producer exiting"); }
void Algorithm::RunParallel(set<Algorithm*> algos, Graph& G, vector<string> parameters, float MaxApproximationDistance, float MinCorrectnessProbability) { set<Algorithm*> SelectedAlgorithms; for(set<Algorithm*>::iterator i = algos.begin(); i != algos.end(); i++) if((*i)->SuitableFor(G) && (*i)->CanGuaranteeApproximationDistance(G, MaxApproximationDistance) && (*i)->CanGuaranteeCorrectnessProbability(G, MinCorrectnessProbability)) SelectedAlgorithms.insert(*i); if(SelectedAlgorithms.size() == 0) { throw "No suitable algorithm found"; } else if(SelectedAlgorithms.size() == 1) // we have 1 algorithm => no multithreading needed { Algorithm* algo = *SelectedAlgorithms.begin(); algo->Run(G, parameters); } else { // we have more than 1 algorithm => run them in parallel // give each algorithm its own copy of G map<Thread*, Graph*> GraphCopies; for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++) GraphCopies[*i] = new Graph(G); ConditionVariable synchronize; Thread* finishedAlgorithm = NULL; synchronize.Lock(); cerr << "starting " << SelectedAlgorithms.size() << " of " << algos.size() << " algorithms\n"; for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++) (*i)->RunInThread(GraphCopies[*i], parameters, &synchronize, &finishedAlgorithm); while(finishedAlgorithm == NULL) // a mislead interrupt can cause the Wait to stop, therefore synchronize.Wait(); // this has to be in a loop that checks whether someone has actually finished G = *(GraphCopies[finishedAlgorithm]); cerr << "someone finished. sending termination requests\n"; for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++) (*i)->Terminate(); synchronize.Unlock(); cerr << "waiting for threads to join\n"; for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++) { (*i)->Join(); delete GraphCopies[*i]; } GraphCopies.clear(); cerr << "everyone joined\n"; } }
static status_t acpi_battery_control(void* _cookie, uint32 op, void* arg, size_t len) { battery_device_cookie* device = (battery_device_cookie*)_cookie; status_t err; switch (op) { case IDENTIFY_DEVICE: { if (len < sizeof(uint32)) return B_BAD_VALUE; uint32 magicId = kMagicACPIBatteryID; return user_memcpy(arg, &magicId, sizeof(magicId)); } case GET_BATTERY_INFO: { if (len < sizeof(acpi_battery_info)) return B_BAD_VALUE; acpi_battery_info batteryInfo; err = ReadBatteryStatus(device->driver_cookie, &batteryInfo); if (err != B_OK) return err; return user_memcpy(arg, &batteryInfo, sizeof(batteryInfo)); } case GET_EXTENDED_BATTERY_INFO: { if (len < sizeof(acpi_extended_battery_info)) return B_BAD_VALUE; acpi_extended_battery_info extBatteryInfo; err = ReadBatteryInfo(device->driver_cookie, &extBatteryInfo); if (err != B_OK) return err; return user_memcpy(arg, &extBatteryInfo, sizeof(extBatteryInfo)); } case WATCH_BATTERY: sBatteryCondition.Wait(); if (atomic_get(&(device->stop_watching))) { atomic_set(&(device->stop_watching), 0); return B_ERROR; } return B_OK; case STOP_WATCHING_BATTERY: atomic_set(&(device->stop_watching), 1); sBatteryCondition.NotifyAll(); return B_OK; } return B_DEV_INVALID_IOCTL; }
/* * Check that a condition variable works */ void ThreadTest::testConditionVariable() { Mutex mutex; ConditionVariable condition; MockConditionThread thread(&mutex, &condition); thread.Start(); mutex.Lock(); if (thread.i != MockConditionThread::EXPECTED) { condition.Wait(&mutex); } OLA_ASSERT_EQ(10, thread.i); mutex.Unlock(); thread.Join(); }
/*! Notifies the low resource manager that a resource is lacking. If \a flags and \a timeout specify a timeout, the function will wait until the low resource manager has finished its next iteration of calling low resource handlers, or until the timeout occurs (whichever happens first). */ void low_resource(uint32 resource, uint64 requirements, uint32 flags, uint32 timeout) { // TODO: take requirements into account switch (resource) { case B_KERNEL_RESOURCE_PAGES: case B_KERNEL_RESOURCE_MEMORY: case B_KERNEL_RESOURCE_SEMAPHORES: case B_KERNEL_RESOURCE_ADDRESS_SPACE: break; } release_sem(sLowResourceWaitSem); if ((flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0) sLowResourceWaiterCondition.Wait(flags, timeout); }
static DWORD WINAPI taskEntry(LPVOID arg) { #else static void *taskEntry(void *arg) { #endif while (true) { workerSemaphore.Wait(); // Try to get task from task queue Task *myTask = NULL; { MutexLock lock(*taskQueueMutex); if (taskQueue.size() == 0) break; myTask = taskQueue.back(); taskQueue.pop_back(); } // Do work for _myTask_ PBRT_STARTED_TASK(myTask); myTask->Run(); PBRT_FINISHED_TASK(myTask); tasksRunningCondition.Lock(); int unfinished = --numUnfinishedTasks; if (unfinished == 0) tasksRunningCondition.Signal(); tasksRunningCondition.Unlock(); } // Cleanup from task thread and exit #ifdef PBRT_HAS_PTHREADS pthread_exit(NULL); #endif // PBRT_HAS_PTHREADS return 0; } #endif // !PBRT_USE_GRAND_CENTRAL_DISPATCH void WaitForAllTasks() { #ifdef PBRT_USE_GRAND_CENTRAL_DISPATCH dispatch_group_wait(gcdGroup, DISPATCH_TIME_FOREVER); #else tasksRunningCondition.Lock(); while (numUnfinishedTasks > 0) tasksRunningCondition.Wait(); tasksRunningCondition.Unlock(); #endif }
int WinMonitorData::WaitOnCondition( unsigned int cvid ) { int ret = -1; assert( hMutex != NULL ); ConditionVariableMap::iterator iter = cvmap.find( cvid ); if( iter != cvmap.end() ) { ConditionVariable* cv = cvmap[cvid]; assert( cv != NULL ); ret = cv->Wait(); } else { // bad cvid // TODO how to indicate the error? assert(0); } return ret; }
/** * Block until the OLA Server is running */ void OlaServerThread::WaitForStart() { m_mutex.Lock(); if (!m_is_running) m_condition.Wait(&m_mutex); m_mutex.Unlock(); }