int iothread_perform_impl(void_function_t &&func, void_function_t &&completion) { ASSERT_IS_MAIN_THREAD(); ASSERT_IS_NOT_FORKED_CHILD(); iothread_init(); struct spawn_request_t req(std::move(func), std::move(completion)); int local_thread_count = -1; bool spawn_new_thread = false; { // Lock around a local region. auto &&locker = s_spawn_requests.acquire(); thread_data_t &td = locker.value; td.request_queue.push(std::move(req)); if (td.thread_count < IO_MAX_THREADS) { td.thread_count++; spawn_new_thread = true; } local_thread_count = td.thread_count; } // Kick off the thread if we decided to do so. if (spawn_new_thread) { iothread_spawn(); } return local_thread_count; }
/// The function that does thread work. static void *iothread_worker(void *unused) { UNUSED(unused); struct spawn_request_t req; while (dequeue_spawn_request(&req)) { debug(5, "pthread %p dequeued", this_thread()); // Perform the work req.handler(); // If there's a completion handler, we have to enqueue it on the result queue. // Note we're using std::function's weirdo operator== here if (req.completion != nullptr) { // Enqueue the result, and tell the main thread about it. enqueue_thread_result(std::move(req)); const char wakeup_byte = IO_SERVICE_RESULT_QUEUE; assert_with_errno(write_loop(s_write_pipe, &wakeup_byte, sizeof wakeup_byte) != -1); } } // We believe we have exhausted the thread request queue. We want to decrement // thread_count and exit. But it's possible that a request just came in. Furthermore, // it's possible that the main thread saw that thread_count is full, and decided to not // spawn a new thread, trusting in one of the existing threads to handle it. But we've already // committed to not handling anything else. Therefore, we have to decrement // the thread count under the lock, which we still hold. Likewise, the main thread must // check the value under the lock. int new_thread_count = --s_spawn_requests.acquire().value.thread_count; assert(new_thread_count >= 0); debug(5, "pthread %p exiting", this_thread()); // We're done. return NULL; }
static bool dequeue_spawn_request(spawn_request_t *result) { auto &&locker = s_spawn_requests.acquire(); thread_data_t &td = locker.value; if (!td.request_queue.empty()) { *result = std::move(td.request_queue.front()); td.request_queue.pop(); return true; } return false; }
/// Note that this function is quite sketchy. In particular, it drains threads, not requests, /// meaning that it may leave requests on the queue. This is the desired behavior (it may be called /// before fork, and we don't want to bother servicing requests before we fork), but in the test /// suite we depend on it draining all requests. In practice, this works, because a thread in /// practice won't exit while there is outstanding requests. /// /// At the moment, this function is only used in the test suite and in a /// drain-all-threads-before-fork compatibility mode that no architecture requires, so it's OK that /// it's terrible. void iothread_drain_all(void) { ASSERT_IS_MAIN_THREAD(); ASSERT_IS_NOT_FORKED_CHILD(); #define TIME_DRAIN 0 #if TIME_DRAIN int thread_count = s_spawn_requests.acquire().value.thread_count; double now = timef(); #endif // Nasty polling via select(). while (s_spawn_requests.acquire().value.thread_count > 0) { if (iothread_wait_for_pending_completions(1000)) { iothread_service_completion(); } } #if TIME_DRAIN double after = timef(); fwprintf(stdout, L"(Waited %.02f msec for %d thread(s) to drain)\n", 1000 * (after - now), thread_count); #endif }
// Service the queue of results static void iothread_service_result_queue() { // Move the queue to a local variable. std::queue<spawn_request_t> result_queue; s_result_queue.acquire().value.swap(result_queue); // Perform each completion in order while (!result_queue.empty()) { spawn_request_t req(std::move(result_queue.front())); result_queue.pop(); // ensure we don't invoke empty functions, that raises an exception if (req.completion != nullptr) { req.completion(); } } }
void release_job_id(job_id_t jid) { assert(jid > 0); auto &&locker = locked_consumed_job_ids.acquire(); std::vector<bool> &consumed_job_ids = locker.value; size_t slot = (size_t)(jid - 1), count = consumed_job_ids.size(); // Make sure this slot is within our vector and is currently set to consumed. assert(slot < count); assert(consumed_job_ids.at(slot) == true); // Clear it and then resize the vector to eliminate unused trailing job IDs. consumed_job_ids.at(slot) = false; while (count--) { if (consumed_job_ids.at(count)) break; } consumed_job_ids.resize(count + 1); }
job_id_t acquire_job_id() { auto &&locker = locked_consumed_job_ids.acquire(); std::vector<bool> &consumed_job_ids = locker.value; // Find the index of the first 0 slot. std::vector<bool>::iterator slot = std::find(consumed_job_ids.begin(), consumed_job_ids.end(), false); if (slot != consumed_job_ids.end()) { // We found a slot. Note that slot 0 corresponds to job ID 1. *slot = true; return (job_id_t)(slot - consumed_job_ids.begin() + 1); } // We did not find a slot; create a new slot. The size of the vector is now the job ID // (since it is one larger than the slot). consumed_job_ids.push_back(true); return (job_id_t)consumed_job_ids.size(); }
const wcstring &wgettext(const wchar_t *in) { // Preserve errno across this since this is often used in printing error messages. int err = errno; wcstring key = in; wgettext_init_if_necessary(); auto wmap = wgettext_map.acquire(); wcstring &val = wmap.value[key]; if (val.empty()) { cstring mbs_in = wcs2string(key); char *out = fish_gettext(mbs_in.c_str()); val = format_string(L"%s", out); } errno = err; // The returned string is stored in the map. // TODO: If we want to shrink the map, this would be a problem. return val; }
static void enqueue_thread_result(spawn_request_t req) { s_result_queue.acquire().value.push(std::move(req)); }