int SysQNode::Send (void *pData, int Len) { assert (m_Sanity == 0xEDFEEFBE) ; if (m_Sanity == 0xEDFEEFBE) { DataNode *dNode = (DataNode *) POVMS_Sys_Malloc (sizeof (DataNode)) ; if (dNode == NULL) return (-3) ; dNode->Data = pData ; dNode->Len = Len ; dNode->Next = NULL ; boost::mutex::scoped_lock lock (m_EventMutex) ; if (m_Last != NULL) m_Last->Next = dNode ; if (m_First == NULL) m_First = dNode ; m_Last = dNode ; m_Count++ ; } else return (-2) ; m_Event.notify_one (); return (0) ; }
void stop_service() { { boost::lock_guard<boost::mutex> lock(stop_mutex_); is_running_ = false; } shutdown_condition_.notify_one(); }
void output_(FILE* f) { IO_TYPE ioStream(f,"w"); uint64_t count = 0; uint64_t nextStart = 0; while (count< count_) { boost::mutex::scoped_lock lock(out_buf_mtx_); while (out_buf_size_ == 0) out_buf_con_.wait(lock); IASSERT(fwrite(&out_buf_size_, sizeof(uint32_t), 1, f)==1); IASSERT(fwrite(&out_buf_num_, sizeof(uint32_t), 1, f)==1); //IASSERT(fwrite(&max_record_len_of_this_run_, sizeof(uint32_t), 1, f)==1); //TODO uint64_t nextStartPos = ftell(f); IASSERT(fwrite(&nextStart, sizeof(uint64_t), 1, f)==1); //IASSERT(fwrite(out_buf_, out_buf_size_, 1, f)==1); ioStream.write(out_buf_, out_buf_size_); nextStart = ftell(f); fseek(f, nextStartPos, SEEK_SET); IASSERT(fwrite(&nextStart, sizeof(uint64_t), 1, f)==1); fseek(f, nextStart, SEEK_SET); IASSERT(t_check_sort_()); count += out_buf_num_; out_buf_size_ = out_buf_num_ = 0; out_buf_con_.notify_one(); } std::cout<<"Outputting is over...\n"; }
void push(const T& t) { boost::mutex::scoped_lock l(q_mutex); q_.push(t); { boost::mutex::scoped_lock lock(data_available_mtx); data_available = q_.size() > 0; } data_available_cond.notify_one(); }
/** * \throw std::invalid_argument if the thread is already stopped */ void stop() { { boost::mutex::scoped_lock lock(m_run_mutex); if (!m_run) return; m_run = false; // Notify the asynchronous monitor thread that it should wake up // if it's currently waiting on m_run_mutex m_run_cond.notify_one(); } // can only join once we've released the mutex, so run() loop can finish up m_pthread->join(); }
void work() { try { m_rou(pstade::perfect<void>( boost::lambda::bind(&self_t::yield, this, boost::lambda::_1) )); } catch (exit_exception const&) { } boost::mutex::scoped_lock lock(m_mutex); m_status.set(is_end::value); m_cond.notify_one(); }
void work() { try { m_block( egg::ret<void>(boost::lambda::bind(&self_t::yield, this, boost::lambda::_1)) ); } catch (yield_break_exception const&) { } boost::mutex::scoped_lock lock(m_mutex); #if !defined(NDEBUG) m_presult = PSTADE_NULLPTR; #endif m_status.set(block_end::value); m_cond.notify_one(); }
void yield(result_ref_t result) // in the same thread as 'work()'. { boost::mutex::scoped_lock lock(m_mutex); // 'result' is alive until next increment, // as far as 'value' can go across thread-boundary. m_presult = boost::addressof(result); m_status.reset(block_incrementing::value); m_cond.notify_one(); while (!m_status.test(block_incrementing::value) && !m_status.test(block_interrupted::value)) m_cond.wait(lock); if (m_status.test(block_interrupted::value)) throw yield_break_exception(); }
bool dequeue( msg_type & msg, boost::xtime const& xt) { typename boost::mutex::scoped_lock lock( mtx_); if ( active_ == false && empty_() ) return false; not_empty_cond_.timed_wait( lock, xt, boost::bind( & Queue< T, Q >::consumers_activate_, this) ); if ( empty_() ) msg.reset(); else dequeue_( msg); if ( active_ == true && queue_.size() <= low_water_mark_) not_full_cond_.notify_one(); return msg ? true : false; }
bool dequeue( msg_type & msg) { typename boost::mutex::scoped_lock lock( mtx_); if ( active_ == false && empty_() ) return false; while (empty_() ){ not_empty_cond_.wait( lock, boost::bind( & Queue< T, Q >::consumers_activate_, this) ); } dequeue_( msg); if ( active_ == true && queue_.size() <= low_water_mark_) { not_full_cond_.notify_one(); } return msg ? true : false; }
bool enqueue( msg_type const& msg) { typename boost::mutex::scoped_lock lock( mtx_); if ( active_ == false) return false; not_full_cond_.wait( lock, boost::bind( & Queue< T, Q >::suppliers_activate_, this) ); if ( active_ != false) { enqueue_( msg); not_empty_cond_.notify_one(); return true; } else return false; }
void resultCallback(const hector_move_base_msgs::MoveBaseActionResult& move_base_result) { if ((move_base_result.status.goal_id.id == target_path_goal_.goal_id.id) && (move_base_result.status.goal_id.stamp == target_path_goal_.goal_id.stamp)) { target_path_result_ = move_base_result; condition_path_ready_.notify_one(); } }
void f() { (*msg)(); done = true; c.notify_one(); }
void sort_() { uint64_t count = 0; while (count< count_) { uint32_t pre_buf_size = 0; uint32_t pre_buf_num = 0; { boost::mutex::scoped_lock lock(pre_buf_mtx_); while (pre_buf_size_==0) pre_buf_con_.wait(lock); assert(pre_buf_size_ <= RUN_BUF_SIZE_); memcpy(run_buf_, pre_buf_, pre_buf_size_); pre_buf_size = pre_buf_size_; pre_buf_num = pre_buf_num_; count += pre_buf_num_; pre_buf_num_ = pre_buf_size_ = 0; pre_buf_con_.notify_one(); } key_buf_ = (struct KEY_PTR*)realloc(key_buf_, pre_buf_num*sizeof(struct KEY_PTR)); uint32_t pos = 0; for (uint32_t i = 0; i<pre_buf_num; ++i) { key_buf_[i] = KEY_PTR(pos); assert(pos <= RUN_BUF_SIZE_); pos += *(LEN_TYPE*)(run_buf_ + pos)+sizeof(LEN_TYPE); IASSERT(pos<=pre_buf_size); } IASSERT(pos==pre_buf_size); quick_sort_(0, pre_buf_num-1, pre_buf_num); boost::mutex::scoped_lock lock(out_buf_mtx_); while (out_buf_size_ != 0) out_buf_con_.wait(lock); out_buf_size_ = 0; out_buf_num_ = 0; LEN_TYPE max_len_of_this_run = (LEN_TYPE)0; for (uint32_t i=0; i<pre_buf_num; ++i, ++out_buf_num_) { assert(key_buf_[i].pos <= RUN_BUF_SIZE_); assert(out_buf_size_+key_buf_[i].LEN(run_buf_)+ sizeof(LEN_TYPE) <= RUN_BUF_SIZE_); memcpy(out_buf_+out_buf_size_, run_buf_+ key_buf_[i].pos, key_buf_[i].LEN(run_buf_)+ sizeof(LEN_TYPE)); LEN_TYPE len = key_buf_[i].LEN(run_buf_) + sizeof(LEN_TYPE); out_buf_size_ += len; if(len > max_len_of_this_run) max_len_of_this_run = len; } max_record_len_of_this_run_ = max_len_of_this_run; min_run_buff_size_for_merger_ += max_record_len_of_this_run_; if(max_len_of_this_run > max_record_len_) max_record_len_ = (uint32_t)max_len_of_this_run; IASSERT(out_buf_num_ == pre_buf_num); IASSERT(out_buf_size_ == pre_buf_size); out_buf_con_.notify_one(); } std::cout<<"Sorting is over...\n"; }
void prefetch_(FILE* f) { IO_TYPE ioStream(f); const uint64_t FILE_LEN = ioStream.length(); run_num_ = 0; uint64_t pos = sizeof(uint64_t); std::cout<<std::endl; ioStream.seek(pos); while(pos < FILE_LEN) { std::cout<<"\rA runner is processing "<<pos*1./FILE_LEN<<std::flush; ++run_num_; boost::mutex::scoped_lock lock(pre_buf_mtx_); while (pre_buf_size_!=0) pre_buf_con_.wait(lock); //uint32_t s = (uint32_t)(FILE_LEN-pos>RUN_BUF_SIZE_? RUN_BUF_SIZE_: FILE_LEN-pos); uint32_t s; if(!ioStream.isCompression()) s = (uint32_t)(FILE_LEN-pos>RUN_BUF_SIZE_? RUN_BUF_SIZE_: FILE_LEN-pos); else s = RUN_BUF_SIZE_; //std::cout<<std::endl<<pos<<"-"<<FILE_LEN<<"-"<<RUN_BUF_SIZE_<<"-"<<s<<std::endl; if(!ioStream.isCompression()) ioStream.seek(pos); //IASSERT(fread(pre_buf_, s, 1, f)==1); s = ioStream.read(pre_buf_, s); if(!ioStream.isCompression()) pos += (uint64_t)s; else pos = ioStream.tell(); //check the position of the last record pre_buf_size_ = 0; pre_buf_num_ = 0; for(; pre_buf_size_<s; ++pre_buf_num_) { if (pre_buf_size_+*(LEN_TYPE*)(pre_buf_+pre_buf_size_)+sizeof(LEN_TYPE)>s) break; pre_buf_size_ += *(LEN_TYPE*)(pre_buf_+pre_buf_size_)+sizeof(LEN_TYPE); } pos -= (uint64_t)(s- pre_buf_size_); //std::cout<<"pre_buf_size_ "<<pre_buf_size_<<" pre_buf_num_ "<<pre_buf_num_<<" ret "<<s<<" pos "<<pos<<std::endl; if (pre_buf_num_ == 0) { std::cout<<"\n[Warning]: A record is too long, and has been ignored!\n"; //pos += *(LEN_TYPE*)(pre_buf_+pre_buf_size_) + sizeof(LEN_TYPE); --count_; RUN_BUF_SIZE_ = (uint32_t)((*(LEN_TYPE*)(pre_buf_+pre_buf_size_) + sizeof(LEN_TYPE))*1.1); pre_buf_ = (char*)realloc(pre_buf_, RUN_BUF_SIZE_); continue; } //IASSERT(pre_buf_size_ <= RUN_BUF_SIZE_); pre_buf_con_.notify_one(); } std::cout<<"Prefetching is over...\n"; }
inline void boost_threaded_monitor::notify() { cond_.notify_one(); }