示例#1
0
 void ThreadLoop() {
   while (go_on) {
     {
       std::unique_lock < std::mutex > lock(mutex);
       auto now = std::chrono::system_clock::now();
       auto cur = waiting_tasks.begin();
       // Run a runnable task
       if (waiting_tasks.empty() == false && (*cur).getTime() <= now
           && (*cur).isCancelled() == false) {
         auto ins = running_tasks.insert(*cur);
         waiting_tasks.erase(*cur);
         cur = ins.first;
         (*cur).go();  // Callback will be launched in a detached thread
         running_tasks.erase(*cur);
       }
       // waiting for the next event
       if (waiting_tasks.empty()) {
         blocker.wait(lock);
       } else {
         /*
          * 	Bad: Valgrind complains "invalid read of size 8"
          * 	http://stackoverflow.com/questions/9891767/valgrind-debug-log-invalid-read-of-size-8
          *
          blocker.wait_until(lock, (*cur).getTime());
          *
          */
         auto nxt = waiting_tasks.begin();
         auto nxttime = (*nxt).getTime();
         blocker.wait_until(lock, nxttime);
       }
     }
   }
 }
void timedwait_monotonic(int id)
{
    std::unique_lock<std::mutex> lock(s_mutex);

    //
    // http://en.cppreference.com/w/cpp/thread/condition_variable/wait_until
    //
    // The clock tied to timeout_time is used, which is not required to be a 
    // monotonic clock.There are no guarantees regarding the behavior of this
    // function if the clock is adjusted discontinuously, but the existing
    // implementations convert timeout_time from Clock to std::chrono::system_clock
    // and delegate to POSIX pthread_cond_timedwait so that the wait honors 
    // ajustments to the system clock, but not to the the user-provided Clock. 
    // In any case, the function also may wait for longer than until after 
    // timeout_time has been reached due to scheduling or resource contention delays.
    //
    auto start = std::chrono::steady_clock::now();
    std::cv_status ret = s_cond.wait_until(lock, start + std::chrono::seconds(5));

    auto end = std::chrono::steady_clock::now();
    std::chrono::duration<double> duration = end - start;

    if (ret == std::cv_status::timeout)
    {
        std::cout << "t" << id << " timeout: " << duration.count() << std::endl;
    }
    else
    {
        std::cout << "t" << id << " success: " << duration.count() << std::endl;
    }
}
        void run() {
            {
                std::lock_guard<std::mutex> lk(mutex);
                if (running) return;
                running = true;
                for (auto& t : threads) {
                    t = std::thread([this]{this->loop();});
                }
            }
            while (true) {
                std::unique_lock<std::mutex> lk(mutex);
                if (!running) break;

                auto task_it = min_element(tasks.begin(), tasks.end());
                time_point next_task = task_it == tasks.end() ? clock_type::time_point::max() : task_it->start;
                if (tasks_updated.wait_until(lk, next_task) == std::cv_status::timeout) {
                    if (task_it->repeat != clock_type::duration::zero()) {
                        task_it->start += task_it->repeat;
                    }
                    else {
                        handles.remove(task_it);
                        tasks.erase(task_it);
                    }
                    todo.push_back(task_it);
                    modified.notify_all();
                }
            }
            for (auto& t : threads) {
                t.join();
            }
        }
示例#4
0
文件: main.cpp 项目: WhiZTiM/coliru
void waits(int idx)
{
    std::unique_lock<std::mutex> lk(cv_m);
    auto now = std::chrono::system_clock::now();
    if(cv.wait_until(lk, now + std::chrono::milliseconds(idx*100), [](){return i == 1;}))
        std::cerr << "Thread " << idx << " finished waiting. i == " << i << '\n';
    else
        std::cerr << "Thread " << idx << " timed out. i == " << i << '\n';
}
示例#5
0
bool wait_loop() {
    const auto timeout =
        std::chrono::steady_clock::now() + std::chrono::milliseconds(500);
    std::unique_lock<std::mutex> lk(mtx);
    while (!done) {
        if (cv.wait_until(lk, timeout) == std::cv_status::timeout)
            break;
    }
    return done;
}
示例#6
0
 SearchReply::UP getReply(uint32_t millis) {
     std::unique_lock<std::mutex> guard(_lock);
     auto deadline = std::chrono::steady_clock::now() + std::chrono::milliseconds(millis);
     while (!_reply) {
         if (_cond.wait_until(guard, deadline) == std::cv_status::timeout) {
             break;
         }
     }
     return std::move(_reply);
 }
示例#7
0
int fakeSocketPoll(struct pollfd *pollfds, int nfds, int timeout)
{
    loggingBuffer << "FakeSocket Poll ";
    for (int i = 0; i < nfds; i++)
    {
        if (i > 0)
            loggingBuffer << ",";
        loggingBuffer << "#" << pollfds[i].fd << ":" << pollBits(pollfds[i].events);
    }
    loggingBuffer << ", timeout:" << timeout << flush();

    std::vector<FakeSocketPair>& fds = getFds();
    std::unique_lock<std::mutex> lock(theMutex);

    if (timeout > 0)
    {
        auto const now = std::chrono::steady_clock::now();
        auto const end = now + std::chrono::milliseconds(timeout);

        while (!checkForPoll(fds, pollfds, nfds))
            if (theCV.wait_until(lock, end) == std::cv_status::timeout)
            {
                loggingBuffer << "FakeSocket Poll timeout: 0" << flush();
                return 0;
            }
    }
    else if (timeout == 0)
    {
        checkForPoll(fds, pollfds, nfds);
    }
    else // timeout < 0
    {
        while (!checkForPoll(fds, pollfds, nfds))
            theCV.wait(lock);
    }

    int result = 0;
    for (int i = 0; i < nfds; i++)
    {
        if (pollfds[i].revents != 0)
            result++;
    }

    loggingBuffer << "FakeSocket Poll result: ";
    for (int i = 0; i < nfds; i++)
    {
        if (i > 0)
            loggingBuffer << ",";
        loggingBuffer << "#" << pollfds[i].fd << ":" << pollBits(pollfds[i].revents);
    }
    loggingBuffer << ": " << result << flush();

    return result;
}
示例#8
0
 std::cv_status wait_until(const std::chrono::duration<Rep, Period>& abs_time)
 {
     auto lc = capo::make<std::unique_lock>(lock_);
     while (counter_ <= 0)
     {
         if (cond_.wait_until(lc, abs_time) == std::cv_status::timeout)
             return std::cv_status::timeout;
     }
     -- counter_;
     return std::cv_status::no_timeout;
 }
示例#9
0
 std::cv_status wait_until(const std::chrono::duration<Rep, Period>& abs_time)
 {
     auto lc = capo::make<std::unique_lock>(lock_);
     while (signaled_ == waiter_status::resting)
     {
         if (cond_.wait_until(lc, abs_time) == std::cv_status::timeout)
             return std::cv_status::timeout;
     }
     if (signaled_ == waiter_status::arrived)
         signaled_ = waiter_status::resting;
     return std::cv_status::no_timeout;
 }
示例#10
0
文件: main.cpp 项目: CCJY/coliru
 void operator()() const {
     std::unique_lock<std::mutex> lk(_mx);
     _cv.wait_until(lk, _deadline, [this] 
             { 
                 std::cout << "worker: Signaled\n";
                 auto now = our_clock::now();
                 if (now >= _deadline)
                     return true;
                 std::cout << "worker: Still waiting " << chrono::duration_cast<chrono::milliseconds>(_deadline - now).count() << "ms...\n"; 
                 return false;
             });
     std::cout << "worker: Done\n";
 }
示例#11
0
	bool fiber_waiter::wait_ready(std::chrono::steady_clock::time_point timeout_point) noexcept
	{
		if (gth_thread_type == thread_type::thread)
		{
			std::unique_lock<std::mutex> lk(m_thread_mutex);
			return m_thread_var.wait_until(lk, timeout_point, [this] { return m_ready.load(std::memory_order_relaxed); });
		}
		else
		{
			std::unique_lock<boost::fibers::mutex> lk(m_fiber_mutex);
			return m_fiber_var.wait_until(lk, timeout_point, [this] { return m_ready.load(std::memory_order_relaxed); });
		}
	}
示例#12
0
文件: synch.hpp 项目: saleyn/utxx
 int wait
 (
     const std::chrono::time_point<Clock, Duration>& wait_until_abs_time,
     long* old_val = NULL
 ) {
     if (old_val) {
         long cur_val = m_count.load(std::memory_order_relaxed);
         if (*old_val != cur_val) {
             *old_val = cur_val;
             return 0;
         }
     }
     std::unique_lock<std::mutex> g(m_lock);
     return m_cond.wait_until(g, wait_until_abs_time) == std::cv_status::no_timeout
          ? 0 : ETIMEDOUT;
 }
void timedwait(int id)
{
    std::unique_lock<std::mutex> lock(s_mutex);

    auto start = std::chrono::system_clock::now();
    std::cv_status ret = s_cond.wait_until(lock, start + std::chrono::seconds(5));

    auto end = std::chrono::system_clock::now();
    std::chrono::duration<double> delta = end - start;

    if (ret == std::cv_status::timeout)
    {
        std::cout << "t" << id << " timeout: " << delta.count() << std::endl;
    }
    else
    {
        std::cout << "t" << id << " success: " << delta.count() << std::endl;
    }
}
示例#14
0
文件: main.cpp 项目: CCJY/coliru
void timer_thread()
{
    while (!isCancelled) {
        std::unique_lock<std::mutex> lock(m);
        std::chrono::system_clock::time_point now;
        while (keys.empty() || earliest > (now = std::chrono::system_clock::now())) {
            if (keys.empty()) {
                cv.wait(lock);
            } else {
                cv.wait_until(lock, earliest);
            }

            if (isCancelled)
                return;
        }

        std::vector<int> expired = remove_expired(now);
        earliest = find_next_timeout();
        lock.unlock();
        std::for_each(expired.begin(), expired.end(), callback);
    }
}
示例#15
0
int main() {
    /* thread */
    std::thread t1(factorial, 6);
    std::this_thread::sleep_for(chrono::milliseconds(3));
    chrono::steady_clock::time_point tp = chrono::steady_clock::now() + chrono::microseconds(4);
    std::this_thread::sleep_until(tp);

    /* Mutex */
    std::mutex mu;
    std::lock_guard<mutex> locker(mu);
    std::unique_lock<mutex> ulocker(mu);
    ulocker.try_lock();
    ulocker.try_lock_for(chrono::nanoseconds(500));
    ulocker.try_lock_until(tp);

    /* Condition Variable */
    std:condition_variable cond;
    cond.wait_for(ulocker, chrono::microseconds(2));
    cond.wait_until(ulocker, tp);

    /* Future and Promise */
    std::promise<int> p; 
    std::future<int> f = p.get_future();
    f.get();
    f.wait();
    f.wait_for(chrono::milliseconds(2));
    f.wait_until(tp);

    /* async() */
    std::future<int> fu = async(factorial, 6);

    /* Packaged Task */
    std::packaged_task<int(int)> t(factorial);
    std::future<int> fu2 = t.get_future();
    t(6);
 	
	 return 0;
}
示例#16
0
void f()
{
    std::unique_lock<std::mutex> lk(mut);
    assert(test2 == 0);
    test1 = 1;
    cv.notify_one();
    Clock::time_point t0 = Clock::now();
    Clock::time_point t = t0 + Clock::duration(250);
    while (test2 == 0 && cv.wait_until(lk, t) == std::cv_status::no_timeout)
        ;
    Clock::time_point t1 = Clock::now();
    if (runs == 0)
    {
        assert(t1 - t0 < Clock::duration(250));
        assert(test2 != 0);
    }
    else
    {
        assert(t1 - t0 - Clock::duration(250) < Clock::duration(5));
        assert(test2 == 0);
    }
    ++runs;
}
示例#17
0
	virtual void workerfun() {
		unique_lock<mutex> lock(m);
		while(running){
			cv.wait(lock, [this]{return !workqueue.empty() || !delayworkqueue.empty() || !running;});

			if(!workqueue.empty()){
				shared_ptr<RunItem> runItem(workqueue.front());
				workqueue.pop_front();
				lock.unlock();
				if(!runItem->mCanceled) {
					(*runItem.get())();
				}
				lock.lock();
			}
			if(!delayworkqueue.empty()){
				shared_ptr<RunItem> runItem = delayworkqueue.top();
				const chrono::time_point<chrono::steady_clock> runAt = runItem->runAt();
				if(chrono::steady_clock::now() >= runAt){
					delayworkqueue.pop();
					lock.unlock();
					if(!runItem->mCanceled){
						(*runItem.get())();
					}
					lock.lock();

					if(runItem->mRepeat && !runItem->mCanceled){
						runItem->mCreateTime = chrono::steady_clock::now();
						delayworkqueue.push(runItem);
					}

				} else if(workqueue.empty()) {
					cv.wait_until(lock, runAt);
				}
			}
		}
	}
BatteryOutput TestRunner::executeBinary(Logger * logger,
                                        const std::string & objectInfo,
                                        const std::string & binaryPath,
                                        const std::string & arguments,
                                        const std::string & input) {
    int stdin_pipe[2];
    int stdout_pipe[2];
    int stderr_pipe[2];

    pid_t pid = 0;
    posix_spawn_file_actions_t actions;

    if(pipe(stdin_pipe) || pipe(stdout_pipe) || pipe(stderr_pipe)) {
        logger->warn(objectInfo + ": pipe creation failed. Test won't be executed.");
        {
            /* Remove thread from list of threads without child, notify and end */
            std::lock_guard<std::mutex> l (withoutChild_mux);
            --withoutChild;
        }
        withoutChild_cv.notify_one();
        return {};
    }

    /* Pipes will be mapped to I/O after process start */
    /* Unused ends are closed */
    posix_spawn_file_actions_init(&actions);
    /* Standard input */
    posix_spawn_file_actions_addclose(&actions , stdin_pipe[1]);
    posix_spawn_file_actions_adddup2(&actions , stdin_pipe[0] , 0);
    posix_spawn_file_actions_addclose(&actions , stdin_pipe[0]);
    /* Standard output */
    posix_spawn_file_actions_addclose(&actions , stdout_pipe[0]);
    posix_spawn_file_actions_adddup2(&actions , stdout_pipe[1] , 1);
    posix_spawn_file_actions_addclose(&actions , stdout_pipe[1]);
    /* Standard error output */
    posix_spawn_file_actions_addclose(&actions , stderr_pipe[0]);
    posix_spawn_file_actions_adddup2(&actions , stderr_pipe[1] , 2);
    posix_spawn_file_actions_addclose(&actions , stderr_pipe[1]);

    int argc = 0;
    char ** args = buildArgv(arguments , &argc);

    /* Creating locks but not locking them yet */
    std::unique_lock<std::mutex> finishedPid_lock(finishedPid_mux , std::defer_lock);
    std::unique_lock<std::mutex> withoutChild_lock(withoutChild_mux , std::defer_lock);
    std::unique_lock<std::mutex> waitingForChild_lock(waitingForChild_mux , std::defer_lock);
    std::unique_lock<std::mutex> childReceived_lock(childReceived_mux , std::defer_lock);
    std::unique_lock<std::mutex> threadsReady_lock(threadState_mux , std::defer_lock);

    /* Need both locks, if they cannot be acquired
     * main thread is waiting for process to end */
    std::lock(finishedPid_lock , waitingForChild_lock);

    /* Starting child process of this thread */
    logger->info(objectInfo + ": spawning child process with arguments " + arguments);
    int status = posix_spawn(&pid , binaryPath.c_str() ,
                             &actions , NULL , args , NULL);

    if(status == 0) {
        /* Process was started without problems, proceed */
        logger->info(objectInfo + ": child process has pid " + Utils::itostr(pid));
        /* Closing pipes */
        close(stdin_pipe[0]);
        close(stdout_pipe[1]);
        close(stderr_pipe[1]);
        if(!input.empty())
            write(stdin_pipe[1] , input.c_str() , input.length());
        /* Start thread for output reading. Thread will be mostly in blocked wait.
         * Also will be essentially over as soon as the process finishes.
         * With this, pipes won't be filled and won't block underlying process. */
        BatteryOutput output;
        std::thread reader(readOutput , std::ref(output),
                           stdout_pipe , stderr_pipe);

        /* Incrementing number of threads waiting.
         * Only if this variable is same as withoutChild
         * main thread can start reaping child processes. */
        ++waitingForChild;
        waitingForChild_lock.unlock();
        waitingForChild_cv.notify_one();

        /* Waiting for PID. PID is annouced by main thread and thread
         * will work with his associated process only
         * (because the thread owns the pipes to this process) */
        auto now = std::chrono::system_clock::now();
        /* Don't know why, but if I use TIMEOUT constant directly in argument it won't compile... */
        int vArgumenteToJebeChybu = PROCESS_TIMEOUT_SECONDS;
        if(!finishedPid_cv.wait_until(finishedPid_lock ,
                                      now + std::chrono::seconds(vArgumenteToJebeChybu) ,
                                      [&] { return finishedPid == pid; })) {
            /* If thread goes into this branch, its process timeouted.
             * Send kill signal to it, main thread will then reap it.
             * That's why this go into wait once more (usually it will instantly return) */
            logger->warn(objectInfo + ": child process with pid " + Utils::itostr(pid) + " timeouted."
                         " Process will be killed now.");
            kill(pid , SIGKILL);
            finishedPid_cv.wait(finishedPid_lock , [&] { return finishedPid == pid; });
        }
        /* Obtaining all possible mutexes, decrementing variables that
         * track waiting and active threads, setting childReceived to true
         * so main thread will be notified */
        std::lock(withoutChild_lock , waitingForChild_lock , childReceived_lock);
        logger->info(objectInfo + ": child process with pid " + Utils::itostr(pid) + " finished");
        childReceived = true;
        --waitingForChild;
        --withoutChild;
        /* Unlock mutexes, this thread no longer needs them */
        finishedPid_lock.unlock();
        waitingForChild_lock.unlock();
        withoutChild_lock.unlock();
        childReceived_lock.unlock();
        /* Notify threadCreator that number of active threads changed.
         * He will spawn new threads if there is a need to. */
        withoutChild_cv.notify_one();

        /* This thread need to wait for notification from threadMaker
         * that it is ok to continue and after that, main thread can
         * be notified that child process was received and is being
         * processed.*/
        threadsReady_lock.lock();
        threadState_cv.wait(threadsReady_lock ,
                            []{ return (threadState == THREAD_STATE_DONE) ||
                                       (threadState == THREAD_STATE_READY); });
        if(threadState == THREAD_STATE_READY) {
            /* Flip threadState so next thread must wait too. */
            threadState = THREAD_STATE_PENDING;
        }
        threadsReady_lock.unlock();

        /* Notify main thread that it can continue in reaping processes */
        childReceived_cv.notify_one();
        /* This thread now completed all communication with other threads.
         *  DO YOUR WORK SLAVE!!! */
        reader.join();

        return output;
    } else {
        /* Some nasty error happened at execution. Report and end thread */
        logger->warn(objectInfo + ": can't execute child process. "
                     "This never happened during development.");
        {
            /* Remove thread from list of threads without child, notify and end */
            std::lock_guard<std::mutex> l (withoutChild_mux);
            --withoutChild;
        }
        withoutChild_cv.notify_one();
        return {};
    }
    /* All locks go out of scope, so everything will be unlocked here */
}