static void * APR_THREAD_FUNC eachThread(apr_thread_t *id, void *p) { test_mode_e test_mode = (test_mode_e)p; lock_grab(test_mode); ++counter; assert(apr_thread_mutex_lock(thread_mutex) == APR_SUCCESS); assert(apr_thread_mutex_unlock(thread_mutex) == APR_SUCCESS); lock_release(test_mode); apr_thread_exit(id, 0); return NULL; }
Move MovePicker::get_next_move(Lock &lock) { Move m; lock_grab(&lock); if(finished) { lock_release(&lock); return MOVE_NONE; } m = this->get_next_move(); if(m == MOVE_NONE) finished = true; lock_release(&lock); return m; }
void Thread::wake_up() { lock_grab(&sleepLock); cond_signal(&sleepCond); lock_release(&sleepLock); }
void ThreadsManager::split(Position& pos, SearchStack* ss, Value* alpha, const Value beta, Value* bestValue, Depth depth, Move threatMove, int moveCount, MovePicker* mp, bool pvNode) { assert(pos.is_ok()); assert(*bestValue >= -VALUE_INFINITE); assert(*bestValue <= *alpha); assert(*alpha < beta); assert(beta <= VALUE_INFINITE); assert(depth > DEPTH_ZERO); assert(pos.thread() >= 0 && pos.thread() < activeThreads); assert(activeThreads > 1); int i, master = pos.thread(); Thread& masterThread = threads[master]; lock_grab(&mpLock); // If no other thread is available to help us, or if we have too many // active split points, don't split. if ( !available_slave_exists(master) || masterThread.activeSplitPoints >= MAX_ACTIVE_SPLIT_POINTS) { lock_release(&mpLock); return; } // Pick the next available split point object from the split point stack SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints++]; // Initialize the split point object splitPoint.parent = masterThread.splitPoint; splitPoint.master = master; splitPoint.is_betaCutoff = false; splitPoint.depth = depth; splitPoint.threatMove = threatMove; splitPoint.alpha = *alpha; splitPoint.beta = beta; splitPoint.pvNode = pvNode; splitPoint.bestValue = *bestValue; splitPoint.mp = mp; splitPoint.moveCount = moveCount; splitPoint.pos = &pos; splitPoint.nodes = 0; splitPoint.ss = ss; for (i = 0; i < activeThreads; i++) splitPoint.is_slave[i] = false; masterThread.splitPoint = &splitPoint; // If we are here it means we are not available assert(masterThread.state != Thread::AVAILABLE); int workersCnt = 1; // At least the master is included // Allocate available threads setting state to THREAD_BOOKED for (i = 0; !Fake && i < activeThreads && workersCnt < maxThreadsPerSplitPoint; i++) if (i != master && threads[i].is_available_to(master)) { threads[i].state = Thread::BOOKED; threads[i].splitPoint = &splitPoint; splitPoint.is_slave[i] = true; workersCnt++; } assert(Fake || workersCnt > 1); // We can release the lock because slave threads are already booked and master is not available lock_release(&mpLock); // Tell the threads that they have work to do. This will make them leave // their idle loop. for (i = 0; i < activeThreads; i++) if (i == master || splitPoint.is_slave[i]) { assert(i == master || threads[i].state == Thread::BOOKED); threads[i].state = Thread::WORKISWAITING; // This makes the slave to exit from idle_loop() if (useSleepingThreads && i != master) threads[i].wake_up(); } // Everything is set up. The master thread enters the idle loop, from // which it will instantly launch a search, because its state is // THREAD_WORKISWAITING. We send the split point as a second parameter to the // idle loop, which means that the main thread will return from the idle // loop when all threads have finished their work at this split point. idle_loop(master, &splitPoint); // We have returned from the idle loop, which means that all threads are // finished. Update alpha and bestValue, and return. lock_grab(&mpLock); *alpha = splitPoint.alpha; *bestValue = splitPoint.bestValue; masterThread.activeSplitPoints--; masterThread.splitPoint = splitPoint.parent; pos.set_nodes_searched(pos.nodes_searched() + splitPoint.nodes); lock_release(&mpLock); }