int wopen(const wcstring &pathname, int flags, mode_t mode) { // off the main thread, always use wopen_cloexec ASSERT_IS_MAIN_THREAD(); ASSERT_IS_NOT_FORKED_CHILD(); return wopen_internal(pathname, flags, mode, false); }
int iothread_perform_impl(void_function_t &&func, void_function_t &&completion) { ASSERT_IS_MAIN_THREAD(); ASSERT_IS_NOT_FORKED_CHILD(); iothread_init(); struct spawn_request_t req(std::move(func), std::move(completion)); int local_thread_count = -1; bool spawn_new_thread = false; { // Lock around a local region. auto &&locker = s_spawn_requests.acquire(); thread_data_t &td = locker.value; td.request_queue.push(std::move(req)); if (td.thread_count < IO_MAX_THREADS) { td.thread_count++; spawn_new_thread = true; } local_thread_count = td.thread_count; } // Kick off the thread if we decided to do so. if (spawn_new_thread) { iothread_spawn(); } return local_thread_count; }
/// Note that this function is quite sketchy. In particular, it drains threads, not requests, /// meaning that it may leave requests on the queue. This is the desired behavior (it may be called /// before fork, and we don't want to bother servicing requests before we fork), but in the test /// suite we depend on it draining all requests. In practice, this works, because a thread in /// practice won't exit while there is outstanding requests. /// /// At the moment, this function is only used in the test suite and in a /// drain-all-threads-before-fork compatibility mode that no architecture requires, so it's OK that /// it's terrible. void iothread_drain_all(void) { ASSERT_IS_MAIN_THREAD(); ASSERT_IS_NOT_FORKED_CHILD(); scoped_lock locker(s_spawn_queue_lock); #define TIME_DRAIN 0 #if TIME_DRAIN int thread_count = s_active_thread_count; double now = timef(); #endif // Nasty polling via select(). while (s_active_thread_count > 0) { locker.unlock(); if (iothread_wait_for_pending_completions(1000)) { iothread_service_completion(); } locker.lock(); } #if TIME_DRAIN double after = timef(); printf("(Waited %.02f msec for %d thread(s) to drain)\n", 1000 * (after - now), thread_count); #endif }
int iothread_perform_base(int (*handler)(void *), void (*completionCallback)(void *, int), void *context) { ASSERT_IS_MAIN_THREAD(); ASSERT_IS_NOT_FORKED_CHILD(); iothread_init(); // Create and initialize a request. struct SpawnRequest_t *req = new SpawnRequest_t(); req->handler = handler; req->completionCallback = completionCallback; req->context = context; int local_thread_count = -1; bool spawn_new_thread = false; { // Lock around a local region. Note that we can only access s_active_thread_count under the // lock. scoped_lock lock(s_spawn_queue_lock); add_to_queue(req); if (s_active_thread_count < IO_MAX_THREADS) { s_active_thread_count++; spawn_new_thread = true; } local_thread_count = s_active_thread_count; } // Kick off the thread if we decided to do so. if (spawn_new_thread) { iothread_spawn(); } // We return the active thread count for informational purposes only. return local_thread_count; }
parser_t &parser_t::principal_parser(void) { ASSERT_IS_NOT_FORKED_CHILD(); ASSERT_IS_MAIN_THREAD(); static parser_t parser; if (!s_principal_parser) { s_principal_parser = &parser; } return parser; }
parser_t &parser_t::principal_parser(void) { ASSERT_IS_NOT_FORKED_CHILD(); ASSERT_IS_MAIN_THREAD(); static parser_t parser(PARSER_TYPE_GENERAL, true); if (! s_principal_parser) { s_principal_parser = &parser; } return parser; }
/// This function is similar to launch_process, except it is not called after a fork (i.e. it only /// calls exec) and therefore it can allocate memory. static void launch_process_nofork(process_t *p) { ASSERT_IS_MAIN_THREAD(); ASSERT_IS_NOT_FORKED_CHILD(); null_terminated_array_t<char> argv_array; convert_wide_array_to_narrow(p->get_argv_array(), &argv_array); const char *const *envv = env_export_arr(); char *actual_cmd = wcs2str(p->actual_cmd); // Ensure the terminal modes are what they were before we changed them. restore_term_mode(); // Bounce to launch_process. This never returns. safe_launch_process(p, actual_cmd, argv_array.get(), envv); }
static int wopen_internal(const wcstring &pathname, int flags, mode_t mode, bool cloexec) { ASSERT_IS_NOT_FORKED_CHILD(); cstring tmp = wcs2string(pathname); /* Prefer to use O_CLOEXEC. It has to both be defined and nonzero. */ #ifdef O_CLOEXEC if (cloexec && (O_CLOEXEC != 0)) { flags |= O_CLOEXEC; cloexec = false; } #endif int fd = ::open(tmp.c_str(), flags, mode); if (cloexec && fd >= 0 && ! set_cloexec(fd)) { close(fd); fd = -1; } return fd; }
static int wopen_internal(const wcstring &pathname, int flags, mode_t mode, bool cloexec) { ASSERT_IS_NOT_FORKED_CHILD(); cstring tmp = wcs2string(pathname); int fd; #ifdef O_CLOEXEC // Prefer to use O_CLOEXEC. It has to both be defined and nonzero. if (cloexec) { fd = open(tmp.c_str(), flags | O_CLOEXEC, mode); } else { fd = open(tmp.c_str(), flags, mode); } #else fd = open(tmp.c_str(), flags, mode); if (fd >= 0 && !set_cloexec(fd)) { close(fd); fd = -1; } #endif return fd; }
/// Note that this function is quite sketchy. In particular, it drains threads, not requests, /// meaning that it may leave requests on the queue. This is the desired behavior (it may be called /// before fork, and we don't want to bother servicing requests before we fork), but in the test /// suite we depend on it draining all requests. In practice, this works, because a thread in /// practice won't exit while there is outstanding requests. /// /// At the moment, this function is only used in the test suite and in a /// drain-all-threads-before-fork compatibility mode that no architecture requires, so it's OK that /// it's terrible. void iothread_drain_all(void) { ASSERT_IS_MAIN_THREAD(); ASSERT_IS_NOT_FORKED_CHILD(); #define TIME_DRAIN 0 #if TIME_DRAIN int thread_count = s_spawn_requests.acquire().value.thread_count; double now = timef(); #endif // Nasty polling via select(). while (s_spawn_requests.acquire().value.thread_count > 0) { if (iothread_wait_for_pending_completions(1000)) { iothread_service_completion(); } } #if TIME_DRAIN double after = timef(); fwprintf(stdout, L"(Waited %.02f msec for %d thread(s) to drain)\n", 1000 * (after - now), thread_count); #endif }
parser_t &parser_t::principal_parser() { ASSERT_IS_NOT_FORKED_CHILD(); ASSERT_IS_MAIN_THREAD(); return s_principal_parser; }
maybe_t<dup2_list_t> dup2_list_t::resolve_chain(const io_chain_t &io_chain) { ASSERT_IS_NOT_FORKED_CHILD(); dup2_list_t result; for (const auto &io_ref : io_chain) { switch (io_ref->io_mode) { case io_mode_t::file: { // Here we definitely do not want to set CLO_EXEC because our child needs access. // Open the file. const io_file_t *io_file = static_cast<const io_file_t *>(io_ref.get()); int file_fd = open(io_file->filename_cstr, io_file->flags, OPEN_MASK); if (file_fd < 0) { if ((io_file->flags & O_EXCL) && (errno == EEXIST)) { debug(1, NOCLOB_ERROR, io_file->filename_cstr); } else { debug(1, FILE_ERROR, io_file->filename_cstr); if (should_debug(1)) wperror(L"open"); } return none(); } // If by chance we got the file we want, we're done. Otherwise move the fd to an // unused place and dup2 it. // Note move_fd_to_unused() will close the incoming file_fd. if (file_fd != io_file->fd) { file_fd = move_fd_to_unused(file_fd, io_chain, false /* cloexec */); if (file_fd < 0) { debug(1, FILE_ERROR, io_file->filename_cstr); if (should_debug(1)) wperror(L"dup"); return none(); } } // Record that we opened this file, so we will auto-close it. assert(file_fd >= 0 && "Should have a valid file_fd"); result.opened_fds_.emplace_back(file_fd); // Mark our dup2 and our close actions. result.add_dup2(file_fd, io_file->fd); result.add_close(file_fd); break; } case io_mode_t::close: { const io_close_t *io = static_cast<const io_close_t *>(io_ref.get()); result.add_close(io->fd); break; } case io_mode_t::fd: { const io_fd_t *io = static_cast<const io_fd_t *>(io_ref.get()); result.add_dup2(io->old_fd, io->fd); break; } case io_mode_t::pipe: { const io_pipe_t *io = static_cast<const io_pipe_t *>(io_ref.get()); result.add_dup2(io->pipe_fd(), io->fd); result.add_close(io->pipe_fd()); break; } case io_mode_t::bufferfill: { const io_bufferfill_t *io = static_cast<const io_bufferfill_t *>(io_ref.get()); result.add_dup2(io->write_fd(), io->fd); result.add_close(io->write_fd()); break; } } } return {std::move(result)}; }