int CommandHandler(XPLMCommandRef inCommand, XPLMCommandPhase inPhase, void* inRefcon) { // if (!gPluginEnabled.load()) { // return IGNORED_EVENT; // } switch (reinterpret_cast<size_t>(inRefcon)) { case CMD_CONTACT_ATC: switch (inPhase) { case xplm_CommandBegin: case xplm_CommandContinue: gPTT_On.store(true); break; case xplm_CommandEnd: gPTT_On.store(false); break; default: break; } break; default: break; } return IGNORED_EVENT; }
// Finalizes a worker thread static int ccr_dec_workers(lua_State *L) { int count = (int)luaL_checkinteger(L, 1); lua_getfield(L, LUA_REGISTRYINDEX, CCR_SELF); process_t *proc = (process_t*)lua_touserdata(L, -1); // checks if the calling process is a main process if(proc->main) { if (thr_pool.size() - count < THR_SIZE) { lua_pushinteger(L, 0); lua_pushstring(L, "thread pool is already at the minimum size"); return 2; } // sets the numbers threads to kill free_workers.fetch_and_add(count); // sets the flag indication to kill threads free_flag.compare_and_swap(true, false); // returns the current number of threads in the pool lua_pushinteger(L, (lua_Integer) thr_pool.size() - count); return 1; } lua_pushinteger(L, 0); lua_pushstring(L, "only a main process could free threads"); return 2; }
void statsThread(atomic<bool>& failed) { resetThreadAllocInfo(); for (uint32_t i = 1; i <= 1000; ++i) { void* mem = malloc(500); free(mem); ros::WallDuration(0.001).sleep(); AllocInfo info = getThreadAllocInfo(); if (info.mallocs != i) { ROS_ERROR_STREAM("mallocs is " << info.mallocs << " should be " << i); failed.store(true); return; } if (info.frees != i) { ROS_ERROR_STREAM("mallocs is " << info.frees << " should be " << i); failed.store(true); return; } } }
void push(T const& data) { node* const new_node = new node(data); new_node->next = head.load(); //the cew check if the head equal to new_node->next, if it does, replace head with new_node //if it doesn't, replace the new_node->next with head.(because meanwhile the head has already been modified) while (!head.compare_exchange_weak(new_node->next, new_node)); }
void atomic2(uint64_t cnt){ for(uint64_t i=0; i<cnt; i++){ total2.fetch_add(1); total2.fetch_sub(1); //total2++; } }
void run_test(void) { freelist_type fl(std::allocator<int>(), 8); std::set<dummy*> nodes; dummy d; if (bounded) test_running.store(true); for (int i = 0; i != 4; ++i) { dummy * allocated = fl.template construct<threadsafe, bounded>(); BOOST_REQUIRE(nodes.find(allocated) == nodes.end()); nodes.insert(allocated); } BOOST_FOREACH(dummy * d, nodes) fl.template destruct<threadsafe>(d); nodes.clear(); for (int i = 0; i != 4; ++i) nodes.insert(fl.template construct<threadsafe, bounded>()); BOOST_FOREACH(dummy * d, nodes) fl.template destruct<threadsafe>(d); for (int i = 0; i != 4; ++i) nodes.insert(fl.template construct<threadsafe, bounded>()); if (bounded) test_running.store(false); }
static void* ccr_worker(void *arg) { int r, resume; process_t *proc; while (1) { // Checks if threads need to be killed and the the ready procces's queue is empty // That way only when the queue is empty the threads are killed // if ((free_flag.compare_and_swap(false, true)) && (prc_ready.empty())) if (free_flag.compare_and_swap(false, true)) { pthread_t thread = pthread_self(); // removes reference from the pool thr_pool.remove(thread); // checks if threre's more threads to kill and set the flag if (free_workers.fetch_and_decrement() > 1) { free_flag.compare_and_swap(true, false); } //kills the current thread pthread_exit(NULL); } prc_ready.pop(proc); if (!proc) return NULL; r = lua_resume(proc->L, 0); switch (r) { case LUA_YIELD: //cerr << "Yield!\n"; switch (proc->status) { case PS_READY: // releasing the lock acquired in ccr_yield proc->wlock.release(); prc_ready.push(proc); break; case PS_BLOCKING: proc->status = PS_BLOCKED; // releasing the lock acquired in ccr_yield proc->wlock.release(); break; } break; case LUA_ERRRUN: case LUA_ERRMEM: case LUA_ERRERR: cerr << "[ERROR][PROCESSING A LUA PROCESS] " << lua_tostring(proc->L, -1) << endl; // fall-through case 0: lua_close(proc->L); mbx_close(proc->mbox); prc_free(proc); break; } } return NULL; }
void release( scope_buffer_pool & pool ) { bool allocated = _status.load( boost::memory_order_relaxed ) != free; if( !allocated ) return; pool.deallocate( _data.get() ); _status.store( free, boost::memory_order_release ); }
bool await(function<void()> cb = []{}) { int my_gen = generation.load(); if (count.fetch_add(1) == N_THREADS - 1) { if (cb) cb(); count.store(0); generation.fetch_add(1); return true; } else { do { } while (my_gen == generation.load()); return false; } }
unsigned int pull() { int stage = _stage.load( std::memory_order_relaxed ); bool changed = _state[stage].changed.load( std::memory_order_relaxed ); if( changed ) { _state[_out].changed.store( false, std::memory_order_relaxed ); _out = _stage.exchange( _out, std::memory_order_acquire ); return _state[_out].frames; } return 0; }
int foo(atomic<int>& x) { for(size_t n = 0; ; ++n) { auto expected = x.load(); auto desired = 0; x.compare_exchange_strong( expected, desired); if(n == loop) return desired; } }
void operator()( int i ) const { internal::concurrent_monitor::thread_context thr_ctx; if( i==0 ) { size_t n_expected_sleepers = NTHRS_USED_IN_DESTRUCTOR_TEST-1; while( n_sleepers<n_expected_sleepers ) __TBB_Yield(); while( n_sleepers.compare_and_swap( VLN+NTHRS_USED_IN_DESTRUCTOR_TEST, n_expected_sleepers )!=n_expected_sleepers ) __TBB_Yield(); for( int j=0; j<100; ++j ) Harness::Sleep( 1 ); delete mon; mon = NULL; } else { mon->prepare_wait( thr_ctx, uintptr_t(this) ); while( n_sleepers<VLN ) { try { ++n_sleepers; mon->commit_wait( thr_ctx ); if( --n_sleepers>VLN ) break; } catch( tbb::user_abort& ) { // can no longer access 'mon' break; } mon->prepare_wait( thr_ctx, uintptr_t(this) ); } } }
void unlock() { tid zero(0); tid thread_id = this_thread::get_id(); if(!block.compare_exchange_strong(thread_id, zero, memory_order_release)) { throw new exception; } }
void numberOfFaces(string filePath) { if (!instance.get()) { CascadeClassifier* face_cascade = new CascadeClassifier(); face_cascade->load(face_cascade_name); instance.reset(face_cascade); } Mat faceImage = imread(filePath, IMREAD_COLOR); if (faceImage.empty()) // Check for invalid input { cout << "Could not open or find the image" << endl; return; } Mat frame_gray; std::vector<Rect> faces; cvtColor(faceImage, frame_gray, CV_BGR2GRAY); equalizeHist(frame_gray, frame_gray); instance->detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30)); int numFaces = faces.size(); //cout << "Found " << numFaces << " faces in image: " << filePath << endl; totalFaces.fetch_add(numFaces, boost::memory_order_seq_cst); //cout << "Total: " << totalFaces << endl; /*if (numFaces > 0) { drawFaceElipse(faceImage, faces); }*/ }
bool server::bind(const std::string& ipcfile, size_t buffer_size) { logstream(LOG_INFO) << "Server attaching to " << ipcfile << " " << buffer_size << std::endl; size_t _run_progress = 0; try { m_shmname = ipcfile; if (m_shmname.empty()) { std::stringstream strm; strm << get_my_pid() << "_" << SERVER_IPC_COUNTER.inc(); m_shmname= strm.str(); } // sets up the raii deleter so that we eventually // delete this file _run_progress = 1; m_ipcfile_deleter = register_shared_memory_name(m_shmname); // this is really modified from code in // http://www.boost.org/doc/libs/1_58_0/doc/html/interprocess/synchronization_mechanisms.html _run_progress = 2; m_shared_object.reset(new shared_memory_object(create_only, m_shmname.c_str(), read_write)); //Set size _run_progress = 3; m_shared_object->truncate(buffer_size + sizeof(shared_memory_buffer)); //Map the whole shared memory in this process _run_progress = 4; m_mapped_region.reset(new mapped_region(*m_shared_object, read_write)); _run_progress = 5; void* buffer = m_mapped_region->get_address(); // placement new. put the data in the buffer _run_progress = 6; m_buffer = new (buffer) shared_memory_buffer; m_buffer->m_buffer_size = buffer_size; m_buffer->m_server_to_client.sender_pid = get_my_pid(); } catch (const std::string& error) { logstream(LOG_ERROR) << "SHMIPC initialization Error (1), stage " << _run_progress << ": " << error << std::endl; return false; } catch (const std::exception& error) { logstream(LOG_ERROR) << "SHMIPC initialization Error (2), stage " << _run_progress << ": " << error.what() << std::endl; return false; } catch (...) { logstream(LOG_ERROR) << "Unknown SHMIPC Initialization Error, stage " << _run_progress << "." << std::endl; return false; } return true; }
// Perform an atomic bitwise-AND on the operand, and return its previous value. inline uintptr_t fetch_and_and(atomic<uintptr_t>& operand, uintptr_t value) { for (tbb::internal::atomic_backoff b;;b.pause()) { uintptr_t old = operand; uintptr_t result = operand.compare_and_swap(old&value, old); if (result==old) return result; } }
int flushInBackground() { vector<std::pair<unsigned int,unsigned int>>& vec = *consumerVec.load(); if (vec.size() > 0){ std::stable_sort(vec.begin(),vec.end(),postingComp()); unsigned int wordid = vec[0].second; shared_ptr<Set> docSet = getOrCreate(wordid); unsigned int last = vec[0].first; for (auto posting : vec){ if (posting.second != wordid){ batchPut(wordid, docSet); docSet = getOrCreate(posting.second); wordid = posting.second; last = posting.first; } docSet->addDoc(posting.first); assert(posting.first >= last); last = posting.first; } batchPut(wordid, docSet); vec.clear(); } store->Write(batch); batch.Clear(); return 1; }
void IncrementSharedValue10000000Times(RandomDelay& randomDelay) { int count = 0; while (count < 10000000) { randomDelay.doBusyWork(); int expected = 0; if (flag.compare_exchange_strong(expected, 1, memory_order_relaxed)) { // Lock was successful sharedValue++; flag.store(0, memory_order_relaxed); count++; } } }
void private_worker::run() { if( my_state.compare_and_swap( st_normal, st_init )==st_init ) { ::rml::job& j = *my_client.create_one_job(); --my_server.my_slack; while( my_state==st_normal ) { if( my_server.my_slack>=0 ) { my_client.process(j); } else { thread_monitor::cookie c; // Prepare to wait my_thread_monitor.prepare_wait(c); // Check/set the invariant for sleeping if( my_state==st_normal && my_server.try_insert_in_asleep_list(*this) ) { my_thread_monitor.commit_wait(c); // Propagate chain reaction if( my_server.has_sleepers() ) my_server.wake_some(0); } else { // Invariant broken my_thread_monitor.cancel_wait(); } } } my_client.cleanup(j); ++my_server.my_slack; } my_server.remove_server_ref(); }
void private_server::wake_some( int additional_slack ) { __TBB_ASSERT( additional_slack>=0, NULL ); private_worker* wakee[2]; private_worker**w = wakee; { tbb::spin_mutex::scoped_lock lock(my_asleep_list_mutex); while( my_asleep_list_root && w<wakee+2 ) { if( additional_slack>0 ) { --additional_slack; } else { // Try to claim unit of slack int old; do { old = my_slack; if( old<=0 ) goto done; } while( my_slack.compare_and_swap(old-1,old)!=old ); } // Pop sleeping worker to combine with claimed unit of slack my_asleep_list_root = (*w++ = my_asleep_list_root)->my_next; } if( additional_slack ) { // Contribute our unused slack to my_slack. my_slack += additional_slack; } } done: while( w>wakee ) (*--w)->my_thread_monitor.notify(); }
void PstnThread::onJoinSuccess(const char *cname, unsigned uid, const char *msg) { (void)cname; (void)uid; joined_flag_.store(true); LOG(INFO, "Joined the channel %s: %s", cname, msg); }
namespace microthread{ static atomic<unsigned int> _id = 0; static unordered_map<unsigned int,unique_ptr<task>> tasks; task::task( unsigned int _id, const function<void()> &f) : id(_id), sig(new(nothrow)flowcontrol::signal()), coro(new(nothrow)coroutine(*this, f)), handle(id, coro, sig){ } handle &create( const function<void()> &f){ auto id = _id.fetch_add(1); auto t = make_unique<task>(id, f); auto &slot = tasks[id]; slot = std::move(t); return slot->handle; } };
void run(queue & stk) { BOOST_WARN(stk.is_lock_free()); running.store(true); thread_group writer; thread_group reader; BOOST_REQUIRE(stk.empty()); for (int i = 0; i != reader_threads; ++i) reader.create_thread(boost::bind(&queue_stress_tester::template get_items<queue>, this, boost::ref(stk))); for (int i = 0; i != writer_threads; ++i) writer.create_thread(boost::bind(&queue_stress_tester::template add_items<queue>, this, boost::ref(stk))); using namespace std; cout << "threads created" << endl; writer.join_all(); cout << "writer threads joined, waiting for readers" << endl; running = false; reader.join_all(); cout << "reader threads joined" << endl; BOOST_REQUIRE_EQUAL(data.count_nodes(), 0); BOOST_REQUIRE(stk.empty()); BOOST_REQUIRE_EQUAL(push_count, pop_count); BOOST_REQUIRE_EQUAL(push_count, writer_threads * node_count); }
inline void readlock(request *I) { I->lockclass =QUEUED_RW_LOCK_REQUEST_READ; I->next = NULL; I->s.stateu = 0; I->s.state.successor_class = QUEUED_RW_LOCK_REQUEST_NONE; I->s.state.blocked = true; __sync_synchronize(); request* predecessor = __sync_lock_test_and_set(&tail, I); if (predecessor == NULL) { reader_count.inc(); I->s.state.blocked = false; } else { state_union tempold, tempnew; tempold.state.blocked = true; tempold.state.successor_class = QUEUED_RW_LOCK_REQUEST_NONE; tempnew.state.blocked = true; tempnew.state.successor_class = QUEUED_RW_LOCK_REQUEST_READ; __sync_synchronize(); if (predecessor->lockclass == QUEUED_RW_LOCK_REQUEST_WRITE || atomic_compare_and_swap(predecessor->s.stateu, tempold.stateu, tempnew.stateu)) { predecessor->next = I; // wait __sync_synchronize(); volatile state_union& is = I->s; while(is.state.blocked) sched_yield(); } else { reader_count.inc(); predecessor->next = I; __sync_synchronize(); I->s.state.blocked = false; } } __sync_synchronize(); if (I->s.state.successor_class == QUEUED_RW_LOCK_REQUEST_READ) { // wait while(I->next == NULL) sched_yield(); reader_count.inc(); I->next->s.state.blocked = false; } }
bool try_terminate(size_t cpuid, std::pair<size_t, bool> &job) { job.second = false; numactive.dec(); cons.begin_done_critical_section(cpuid); job = queue.try_dequeue(); if (job.second == false) { bool ret = cons.end_done_critical_section(cpuid); numactive.inc(); return ret; } else { cons.cancel_critical_section(cpuid); numactive.inc(); return false; } }
float FlightLoopCallback(float inElapsedSinceLastCall, float inElapsedTimeSinceLastFlightLoop, int inCounter, void* inRefcon) { if (!gPluginEnabled.load()) { } return 1.0; }
void lock() { tid zero(0); tid thread_id = this_thread::get_id(); while(!block.compare_exchange_strong(zero, thread_id, std::memory_order_acquire)) { std::this_thread::yield(); zero = tid(0); } }
void unlock(int threadId) { int freeFlag = -1, toReplace = threadId;; if (!owner.compare_exchange_strong(threadId, freeFlag)) { cerr << "Current owner " << threadId << " but " << "Thread number " + to_string(toReplace) << " is not the owner!" << endl; } }
void populate_queue() { unsigned const number_of_items=20; queue_data.clear(); for(unsigned i=0; i<number_of_items; ++i) { queue_data.push_back(i); } count.store(number_of_items,memory_order_release); }
EXTERN_FORM DLL_API Dvoid __cdecl coold_LoadMeshFromFile( const Dchar* filename ) { GETSINGLE(MeshManager).Clear(); GETSINGLE(MeshManager).LoadMesh(filename); //쓰레드 테스트를 위한 추가 로직---------------------------------- for (Dint i = 0; i < g_sGridCount.load() * g_sGridCount.load() - 1; ++i) { CustomMesh* pMesh = GETSINGLE(MeshManager).GetMesh(filename); if (CustomMeshPLY* pPlyMesh = dynamic_cast<CustomMeshPLY*>(pMesh)) { CustomMesh* pNewMesh = new CustomMeshPLY(*pPlyMesh); GETSINGLE(MeshManager).AddMesh(filename + to_string(i), pNewMesh); } } //----------------------------------------------------------- }