Exemplo n.º 1
0
void
task_set::wait_for_task(size_t taskindex, bool block)
{
    DASSERT(submitter() == std::this_thread::get_id());
    if (taskindex >= m_futures.size())
        return;  // nothing to wait for
    auto& f(m_futures[taskindex]);
    if (block || m_pool->is_worker(m_submitter_thread)) {
        // Block on completion of all the task and don't try to do any
        // of the work with the calling thread.
        f.wait();
        return;
    }
    // If we made it here, we want to allow the calling thread to help
    // do pool work if it's waiting around for a while.
    const std::chrono::milliseconds wait_time(0);
    int tries = 0;
    while (1) {
        // Asking future.wait_for for 0 time just checks the status.
        if (f.wait_for(wait_time) == std::future_status::ready)
            return;  // task has completed
        // We're still waiting for the task to complete. What next?
        if (++tries < 4) {  // First few times,
            pause(4);       //   just busy-wait, check status again
            continue;
        }
        // Since we're waiting, try to run a task ourselves to help
        // with the load. If none is available, just yield schedule.
        if (!m_pool->run_one_task(m_submitter_thread)) {
            // We tried to do a task ourselves, but there weren't any
            // left, so just wait for the rest to finish.
            yield();
        }
    }
}
Exemplo n.º 2
0
void
task_set::wait(bool block)
{
    DASSERT(submitter() == std::this_thread::get_id());
    const std::chrono::milliseconds wait_time(0);
    if (m_pool->is_worker(m_submitter_thread))
        block = true;  // don't get into recursive work stealing
    if (block == false) {
        int tries = 0;
        while (1) {
            bool all_finished = true;
            int nfutures = 0, finished = 0;
            for (auto&& f : m_futures) {
                // Asking future.wait_for for 0 time just checks the status.
                ++nfutures;
                auto status = f.wait_for(wait_time);
                if (status != std::future_status::ready)
                    all_finished = false;
                else
                    ++finished;
            }
            if (all_finished)  // All futures are ready? We're done.
                break;
            // We're still waiting on some tasks to complete. What next?
            if (++tries < 4) {  // First few times,
                pause(4);       //   just busy-wait, check status again
                continue;
            }
            // Since we're waiting, try to run a task ourselves to help
            // with the load. If none is available, just yield schedule.
            if (!m_pool->run_one_task(m_submitter_thread)) {
                // We tried to do a task ourselves, but there weren't any
                // left, so just wait for the rest to finish.
#if 1
                yield();
#else
                // FIXME -- as currently written, if we see an empty queue
                // but we're still waiting for the tasks in our set to end,
                // we will keep looping and potentially ourselves do work
                // that was part of another task set. If there a benefit to,
                // once we see an empty queue, only waiting for the existing
                // tasks to finish and not altruistically executing any more
                // tasks?  This is how we would take the exit now:
                for (auto&& f : m_futures)
                    f.wait();
                break;
#endif
            }
        }
    } else {
        // If block is true, just block on completion of all the tasks
        // and don't try to do any of the work with the calling thread.
        for (auto&& f : m_futures)
            f.wait();
    }
#ifndef NDEBUG
    check_done();
#endif
}
Exemplo n.º 3
0
int main(int argc,char **argv)
{
	std::string captured;
	try {
		cppcms::service srv(argc,argv);
		write_tests = srv.settings().get("test.write",false);
		booster::intrusive_ptr<cppcms::application> async = new async_test(srv);
		srv.applications_pool().mount( async, cppcms::mount_point("/async") );
		srv.applications_pool().mount( cppcms::applications_factory<sync_test>(), cppcms::mount_point("/sync"));
		srv.after_fork(submitter(srv));
		cppcms::copy_filter flt(std::cerr); // record the log
		srv.run();
		captured = flt.detach(); // get the log
	}
	catch(std::exception const &e) {
		std::cerr << e.what() << std::endl;
		return EXIT_FAILURE;
	}
	size_t pos = 0;
	while((pos=captured.find("Timeout on connection",pos))!=std::string::npos) {
		pos+=10;
		count_timeouts++;
	}
	if(
		write_tests ?
		(
			async_bad_count != 2 
			|| sync_bad_count != 2 
			|| count_timeouts != 4
			|| above_15 != 2
			|| below_10 != 2
		)
		:
		(
			!eof_detected 
			|| count_timeouts != 5
		)
	  ) 
	{
		print_count_report(std::cerr);
		std::cerr << "Failed" << std::endl;
		return EXIT_FAILURE;
	}
	print_count_report(std::cout);
	if(!run_ok ) {
		std::cerr << "Python script failed" << std::endl;
		return EXIT_FAILURE;
	}
	std::cout << "Ok" << std::endl;
	return EXIT_SUCCESS;
}
Exemplo n.º 4
0
int main(int argc,char **argv)
{
	try {
		cppcms::service srv(argc,argv);
		srv.applications_pool().mount( cppcms::create_pool<unit_test>());
		srv.after_fork(submitter(srv));
		srv.run();
	}
	catch(std::exception const &e) {
		std::cerr << e.what() << std::endl;
		return EXIT_FAILURE;
	}
	return run_ok ? EXIT_SUCCESS : EXIT_FAILURE;
}
Exemplo n.º 5
0
int main(int argc,char **argv)
{
	try {
		cppcms::service srv(argc,argv);
		booster::intrusive_ptr<cppcms::application> async = new async_unit_test(srv);
		srv.applications_pool().mount( async, cppcms::mount_point("/async") );
		srv.applications_pool().mount( cppcms::applications_factory<unit_test>(), cppcms::mount_point("/sync"));
		srv.after_fork(submitter(srv));
		srv.run();
	}
	catch(std::exception const &e) {
		std::cerr << e.what() << std::endl;
		return EXIT_FAILURE;
	}
	if(bad_count != 3 || calls != 4) {
		std::cerr << "Failed bad_count = " << bad_count << " calls = " << calls << std::endl;
		return EXIT_FAILURE;
	}
	return EXIT_SUCCESS;
}