Exemplo n.º 1
0
void SyncSession::advance_state(std::unique_lock<std::mutex>& lock, const State& state)
{
    REALM_ASSERT(lock.owns_lock());
    REALM_ASSERT(&state != m_state);
    m_state = &state;
    m_state->enter_state(lock, *this);
}
Exemplo n.º 2
0
 bool io_service_thread_pool::run(std::unique_lock<compat::mutex>& l,
     std::size_t num_threads)
 {
     HPX_ASSERT(l.owns_lock());
     compat::barrier startup(1);
     return threads_.run(num_threads, false, &startup);
 }
Exemplo n.º 3
0
void SyncSession::unregister(std::unique_lock<std::mutex>& lock)
{
    REALM_ASSERT(lock.owns_lock());
    REALM_ASSERT(m_state == &State::inactive); // Must stop an active session before unregistering.

    lock.unlock();
    SyncManager::shared().unregister_session(m_realm_path);
}
Exemplo n.º 4
0
void Solver::proceed(std::unique_lock<std::mutex>& lock)
{
	assert(lock.mutex() == &workerMutex && lock.owns_lock());
	wakeMainThreadRequested = true;
	wakeMainThread.notify_one();
	wakeWorkerThread.wait(lock, [&]() { return wakeWorkerThreadRequested; });
	wakeWorkerThreadRequested = false;
}
Exemplo n.º 5
0
	// checks to see if we're no longer exceeding the high watermark,
	// and if we're in fact below the low watermark. If so, we need to
	// post the notification messages to the peers that are waiting for
	// more buffers to received data into
	void disk_buffer_pool::check_buffer_level(std::unique_lock<std::mutex>& l)
	{
		TORRENT_ASSERT(l.owns_lock());
		if (!m_exceeded_max_size || m_in_use > m_low_watermark) return;

		m_exceeded_max_size = false;

		std::vector<std::weak_ptr<disk_observer>> cbs;
		m_observers.swap(cbs);
		l.unlock();
		m_ios.post(std::bind(&watermark_callback, std::move(cbs)));
	}
Exemplo n.º 6
0
void DinicVertexLayerP::clearJobs(std::unique_lock<std::mutex>& lock, bool check)
{
	if (!lock.owns_lock())
	{
		lock.lock();
	}
	jobs.clear();
	jobs.swap(queueJobs);
	runJobs = vector<bool>(jobs.size(), false);
	assert(queueJobs.empty());
	if (check) assert(jobs.empty());
}
Exemplo n.º 7
0
	void disk_buffer_pool::free_buffer_impl(char* buf, std::unique_lock<std::mutex>& l)
	{
		TORRENT_ASSERT(buf);
		TORRENT_ASSERT(m_magic == 0x1337);
		TORRENT_ASSERT(m_settings_set);
		TORRENT_ASSERT(l.owns_lock());
		TORRENT_UNUSED(l);

		page_free(buf);

		--m_in_use;
	}
Exemplo n.º 8
0
    // schedule a high priority task after a given time interval
    void interval_timer::schedule_thread(std::unique_lock<mutex_type> & l)
    {
        HPX_ASSERT(l.owns_lock());

        using namespace hpx::threads;

        error_code ec;

        // create a new suspended thread
        threads::thread_id_type id;
        {
            // FIXME: registering threads might lead to thread suspension since
            // the allocators use hpx::lcos::local::spinlock. Unlocking the
            // lock here would be the right thing but leads to crashes and hangs
            // at shutdown.
            //util::unlock_guard<std::unique_lock<mutex_type> > ul(l);
            id = hpx::applier::register_thread_plain(
                util::bind(&interval_timer::evaluate,
                    this->shared_from_this(), util::placeholders::_1),
                description_.c_str(), threads::suspended, true,
                threads::thread_priority_boost, std::size_t(-1),
                threads::thread_stacksize_default, ec);
        }

        if (ec) {
            is_terminated_ = true;
            is_started_ = false;
            return;
        }

        // schedule this thread to be run after the given amount of seconds
        threads::set_thread_state(id,
            boost::chrono::microseconds(microsecs_),
            threads::pending, threads::wait_signaled,
            threads::thread_priority_boost, ec);

        if (ec) {
            is_terminated_ = true;
            is_started_ = false;

            // abort the newly created thread
            threads::set_thread_state(id, threads::pending, threads::wait_abort,
                threads::thread_priority_boost, ec);

            return;
        }

        id_ = id;
        is_started_ = true;
    }
Exemplo n.º 9
0
void FunctionScheduler::addFunctionToHeap(
    const std::unique_lock<std::mutex>& lock,
    RepeatFunc&& func) {
  // This function should only be called with mutex_ already locked.
  DCHECK(lock.mutex() == &mutex_);
  DCHECK(lock.owns_lock());

  functions_.emplace_back(std::move(func));
  if (running_) {
    functions_.back().resetNextRunTime(steady_clock::now());
    std::push_heap(functions_.begin(), functions_.end(), fnCmp_);
    // Signal the running thread to wake up and see if it needs to change
    // its current scheduling decision.
    runningCondvar_.notify_one();
  }
}
Exemplo n.º 10
0
	bool disk_buffer_pool::is_disk_buffer(char* buffer
		, std::unique_lock<std::mutex>& l) const
	{
		TORRENT_ASSERT(m_magic == 0x1337);
		TORRENT_ASSERT(l.owns_lock());
		TORRENT_UNUSED(l);

#if TORRENT_USE_INVARIANT_CHECKS
		return m_buffers_in_use.count(buffer) == 1;
#elif defined TORRENT_DEBUG_BUFFERS
		return page_in_use(buffer);
#else
		TORRENT_UNUSED(buffer);
		return true;
#endif
	}
Exemplo n.º 11
0
void CacheHost::releaseItem(Cacheable *item,
		std::unique_lock<std::mutex> &lock) {
	assert(lock.owns_lock());
	
	assert(item != &p_sentinel);
	item->p_alive = false;
	p_activeFootprint -= item->getFootprint();

	// remove the item from the list
	Cacheable *less_recently = item->p_lessRecentlyUsed;
	Cacheable *more_recently = item->p_moreRecentlyUsed;
	less_recently->p_moreRecentlyUsed = more_recently;
	more_recently->p_lessRecentlyUsed = less_recently;

	lock.unlock();
	item->release();
	lock.lock();
}
Exemplo n.º 12
0
        bool add_new_always(std::size_t& added, thread_queue* addfrom,
            std::unique_lock<mutex_type> &lk, bool steal = false)
        {
            HPX_ASSERT(lk.owns_lock());

#ifdef HPX_HAVE_THREAD_CREATION_AND_CLEANUP_RATES
            util::tick_counter tc(add_new_time_);
#endif

            if (0 == addfrom->new_tasks_count_.load(boost::memory_order_relaxed))
                return false;

            // create new threads from pending tasks (if appropriate)
            boost::int64_t add_count = -1;                  // default is no constraint

            // if we are desperate (no work in the queues), add some even if the
            // map holds more than max_count
            if (HPX_LIKELY(max_count_)) {
                std::size_t count = thread_map_.size();
                if (max_count_ >= count + min_add_new_count) { //-V104
                    HPX_ASSERT(max_count_ - count <
                        static_cast<std::size_t>((std::numeric_limits
                            <boost::int64_t>::max)()));
                    add_count = static_cast<boost::int64_t>(max_count_ - count);
                    if (add_count < min_add_new_count)
                        add_count = min_add_new_count;
                    if (add_count > max_add_new_count)
                        add_count = max_add_new_count;
                }
                else if (work_items_.empty()) {
                    add_count = min_add_new_count;    // add this number of threads
                    max_count_ += min_add_new_count;  // increase max_count //-V101
                }
                else {
                    return false;
                }
            }

            std::size_t addednew = add_new(add_count, addfrom, lk, steal);
            added += addednew;
            return addednew != 0;
        }
Exemplo n.º 13
0
void FunctionScheduler::cancelFunction(const std::unique_lock<std::mutex>& l,
                                       FunctionHeap::iterator it) {
  // This function should only be called with mutex_ already locked.
  DCHECK(l.mutex() == &mutex_);
  DCHECK(l.owns_lock());

  if (running_) {
    // Internally gcc has an __adjust_heap() function to fill in a hole in the
    // heap.  Unfortunately it isn't part of the standard API.
    //
    // For now we just leave the RepeatFunc in our heap, but mark it as unused.
    // When its nextTimeInterval comes up, the runner thread will pop it from
    // the heap and simply throw it away.
    it->cancel();
  } else {
    // We're not running, so functions_ doesn't need to be maintained in heap
    // order.
    functions_.erase(it);
  }
}
Exemplo n.º 14
0
void MultiTaskScheduler::resume(Task* task, std::unique_lock<Spinlock> lock) {
    assert(lock.owns_lock());
    assert(task->status == Starting || task->status == Listening || task->status == Suspended);
    assert(!task->scheduled);
    TaskStatus status = task->status;
    task->resumes += 1;
    task->scheduled = true;
    lock.unlock();

    if (status == Starting || status == Listening) {
        std::unique_lock<Spinlock> lock(softMutex);
        softTasks.push_front(task);
    } else if (status == Suspended) {
        std::unique_lock<Spinlock> lock(hardMutex);
        hardTasks.push_front(task);
    } else {
        // Impossible.
        __builtin_unreachable();
    }
}
Exemplo n.º 15
0
        bool add_new_if_possible(std::size_t& added, thread_queue* addfrom,
            std::unique_lock<mutex_type> &lk, bool steal = false)
        {
            HPX_ASSERT(lk.owns_lock());

#ifdef HPX_HAVE_THREAD_CREATION_AND_CLEANUP_RATES
            util::tick_counter tc(add_new_time_);
#endif

            if (0 == addfrom->new_tasks_count_.load(boost::memory_order_relaxed))
                return false;

            // create new threads from pending tasks (if appropriate)
            boost::int64_t add_count = -1;                  // default is no constraint

            // if the map doesn't hold max_count threads yet add some
            // FIXME: why do we have this test? can max_count_ ever be zero?
            if (HPX_LIKELY(max_count_)) {
                std::size_t count = thread_map_.size();
                if (max_count_ >= count + min_add_new_count) { //-V104
                    HPX_ASSERT(max_count_ - count <
                        static_cast<std::size_t>((std::numeric_limits
                            <boost::int64_t>::max)()));
                    add_count = static_cast<boost::int64_t>(max_count_ - count);
                    if (add_count < min_add_new_count)
                        add_count = min_add_new_count;
                }
                else {
                    return false;
                }
            }

            std::size_t addednew = add_new(add_count, addfrom, lk, steal);
            added += addednew;
            return addednew != 0;
        }
Exemplo n.º 16
0
        ///////////////////////////////////////////////////////////////////////
        // add new threads if there is some amount of work available
        std::size_t add_new(boost::int64_t add_count, thread_queue* addfrom,
            std::unique_lock<mutex_type> &lk, bool steal = false)
        {
            HPX_ASSERT(lk.owns_lock());

            if (HPX_UNLIKELY(0 == add_count))
                return 0;

            std::size_t added = 0;
            task_description* task = 0;
            while (add_count-- && addfrom->new_tasks_.pop(task, steal))
            {
#ifdef HPX_HAVE_THREAD_QUEUE_WAITTIME
                if (maintain_queue_wait_times) {
                    addfrom->new_tasks_wait_ +=
                        util::high_resolution_clock::now() - util::get<2>(*task);
                    ++addfrom->new_tasks_wait_count_;
                }
#endif
                --addfrom->new_tasks_count_;

                // measure thread creation time
                util::block_profiler_wrapper<add_new_tag> bp(add_new_logger_);

                // create the new thread
                threads::thread_init_data& data = util::get<0>(*task);
                thread_state_enum state = util::get<1>(*task);
                threads::thread_id_type thrd;

                create_thread_object(thrd, data, state, lk);

                delete task;

                // add the new entry to the map of all threads
                std::pair<thread_map_type::iterator, bool> p =
                    thread_map_.insert(thrd);

                if (HPX_UNLIKELY(!p.second)) {
                    HPX_THROW_EXCEPTION(hpx::out_of_memory,
                        "threadmanager::add_new",
                        "Couldn't add new thread to the thread map");
                    return 0;
                }
                ++thread_map_count_;

                // only insert the thread into the work-items queue if it is in
                // pending state
                if (state == pending) {
                    // pushing the new thread into the pending queue of the
                    // specified thread_queue
                    ++added;
                    schedule_thread(thrd.get());
                }

                // this thread has to be in the map now
                HPX_ASSERT(thread_map_.find(thrd.get()) != thread_map_.end());
                HPX_ASSERT(thrd->get_pool() == &memory_pool_);
            }

            if (added) {
                LTM_(debug) << "add_new: added " << added << " tasks to queues"; //-V128
            }
            return added;
        }
Exemplo n.º 17
0
void FunctionScheduler::runOneFunction(std::unique_lock<std::mutex>& lock,
                                       steady_clock::time_point now) {
  DCHECK(lock.mutex() == &mutex_);
  DCHECK(lock.owns_lock());

  // The function to run will be at the end of functions_ already.
  //
  // Fully remove it from functions_ now.
  // We need to release mutex_ while we invoke this function, and we need to
  // maintain the heap property on functions_ while mutex_ is unlocked.
  RepeatFunc func(std::move(functions_.back()));
  functions_.pop_back();
  if (!func.cb) {
    VLOG(5) << func.name << "function has been canceled while waiting";
    return;
  }
  currentFunction_ = &func;

  // Update the function's next run time.
  if (steady_) {
    // This allows scheduler to catch up
    func.setNextRunTimeSteady();
  } else {
    // Note that we set nextRunTime based on the current time where we started
    // the function call, rather than the time when the function finishes.
    // This ensures that we call the function once every time interval, as
    // opposed to waiting time interval seconds between calls.  (These can be
    // different if the function takes a significant amount of time to run.)
    func.setNextRunTimeStrict(now);
  }

  // Release the lock while we invoke the user's function
  lock.unlock();

  // Invoke the function
  try {
    VLOG(5) << "Now running " << func.name;
    func.cb();
  } catch (const std::exception& ex) {
    LOG(ERROR) << "Error running the scheduled function <"
      << func.name << ">: " << exceptionStr(ex);
  }

  // Re-acquire the lock
  lock.lock();

  if (!currentFunction_) {
    // The function was cancelled while we were running it.
    // We shouldn't reschedule it;
    return;
  }
  // Clear currentFunction_
  CHECK_EQ(currentFunction_, &func);
  currentFunction_ = nullptr;

  // Re-insert the function into our functions_ heap.
  // We only maintain the heap property while running_ is set.  (running_ may
  // have been cleared while we were invoking the user's function.)
  functions_.push_back(std::move(func));
  if (running_) {
    std::push_heap(functions_.begin(), functions_.end(), fnCmp_);
  }
}
Exemplo n.º 18
0
Arquivo: main.cpp Projeto: CCJY/coliru
void call_locked(const std::unique_lock<std::mutex>& A) {
    if(!A.owns_lock()) // possible with e.g. std::defer_lock
        throw std::logic_error("Not locked you jarhead!");
        
    std::cout << "call_locked: A is locked\n";
}