void check_for_interruption_critical (void) { MonoInternalThread *thread; /*RULE NUMBER ONE OF SKIP_THREAD: NEVER POKE MANAGED STATE.*/ mono_gc_set_skip_thread (FALSE); thread = mono_thread_internal_current (); if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); /*RULE NUMBER TWO OF SKIP_THREAD: READ RULE NUMBER ONE.*/ mono_gc_set_skip_thread (TRUE); }
static gint epoll_event_wait (void (*callback) (gint fd, gint events, gpointer user_data), gpointer user_data) { gint i, ready; memset (epoll_events, 0, sizeof (struct epoll_event) * EPOLL_NEVENTS); mono_gc_set_skip_thread (TRUE); MONO_ENTER_GC_SAFE; ready = epoll_wait (epoll_fd, epoll_events, EPOLL_NEVENTS, -1); MONO_EXIT_GC_SAFE; mono_gc_set_skip_thread (FALSE); if (ready == -1) { switch (errno) { case EINTR: mono_thread_internal_check_for_interruption_critical (mono_thread_internal_current ()); ready = 0; break; default: g_error ("epoll_event_wait: epoll_wait () failed, error (%d) %s", errno, g_strerror (errno)); break; } } if (ready == -1) return -1; for (i = 0; i < ready; ++i) { gint fd, events = 0; fd = epoll_events [i].data.fd; if (epoll_events [i].events & (EPOLLIN | EPOLLERR | EPOLLHUP)) events |= EVENT_IN; if (epoll_events [i].events & (EPOLLOUT | EPOLLERR | EPOLLHUP)) events |= EVENT_OUT; callback (fd, events, user_data); } return 0; }
static gint kqueue_event_wait (void (*callback) (gint fd, gint events, gpointer user_data), gpointer user_data) { gint i, ready; memset (kqueue_events, 0, sizeof (struct kevent) * KQUEUE_NEVENTS); mono_gc_set_skip_thread (TRUE); ready = kevent (kqueue_fd, NULL, 0, kqueue_events, KQUEUE_NEVENTS, NULL); mono_gc_set_skip_thread (FALSE); if (ready == -1) { switch (errno) { case EINTR: mono_thread_internal_check_for_interruption_critical (mono_thread_internal_current ()); ready = 0; break; default: g_error ("kqueue_event_wait: kevent () failed, error (%d) %s", errno, g_strerror (errno)); break; } } if (ready == -1) return -1; for (i = 0; i < ready; ++i) { gint fd, events = 0; fd = kqueue_events [i].ident; if (kqueue_events [i].filter == EVFILT_READ || (kqueue_events [i].flags & EV_ERROR) != 0) events |= EVENT_IN; if (kqueue_events [i].filter == EVFILT_WRITE || (kqueue_events [i].flags & EV_ERROR) != 0) events |= EVENT_OUT; callback (fd, events, user_data); } return 0; }
static gint poll_event_wait (void (*callback) (gint fd, gint events, gpointer user_data), gpointer user_data) { gint i, ready; for (i = 0; i < poll_fds_size; ++i) poll_fds [i].revents = 0; mono_gc_set_skip_thread (TRUE); MONO_ENTER_GC_SAFE; ready = mono_poll (poll_fds, poll_fds_size, -1); MONO_EXIT_GC_SAFE; mono_gc_set_skip_thread (FALSE); if (ready == -1) { /* * Apart from EINTR, we only check EBADF, for the rest: * EINVAL: mono_poll() 'protects' us from descriptor * numbers above the limit if using select() by marking * then as POLLERR. If a system poll() is being * used, the number of descriptor we're passing will not * be over sysconf(_SC_OPEN_MAX), as the error would have * happened when opening. * * EFAULT: we own the memory pointed by pfds. * ENOMEM: we're doomed anyway * */ #if !defined(HOST_WIN32) switch (errno) #else switch (WSAGetLastError ()) #endif { #if !defined(HOST_WIN32) case EINTR: #else case WSAEINTR: #endif { mono_thread_internal_check_for_interruption_critical (mono_thread_internal_current ()); ready = 0; break; } #if !defined(HOST_WIN32) case EBADF: #else case WSAEBADF: #endif { ready = poll_mark_bad_fds (poll_fds, poll_fds_size); break; } default: #if !defined(HOST_WIN32) g_error ("poll_event_wait: mono_poll () failed, error (%d) %s", errno, g_strerror (errno)); #else g_error ("poll_event_wait: mono_poll () failed, error (%d)\n", WSAGetLastError ()); #endif break; } } if (ready == -1) return -1; if (ready == 0) return 0; g_assert (ready > 0); for (i = 0; i < poll_fds_size; ++i) { gint fd, events = 0; if (poll_fds [i].fd == -1) continue; if (poll_fds [i].revents == 0) continue; fd = poll_fds [i].fd; if (poll_fds [i].revents & (MONO_POLLIN | MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL)) events |= EVENT_IN; if (poll_fds [i].revents & (MONO_POLLOUT | MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL)) events |= EVENT_OUT; if (poll_fds [i].revents & (MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL)) events |= EVENT_ERR; callback (fd, events, user_data); if (--ready == 0) break; } return 0; }
static void tp_kqueue_wait (gpointer p) { SocketIOData *socket_io_data; int kfd; MonoInternalThread *thread; struct kevent *events, *evt; int ready = 0, i; gpointer async_results [KQUEUE_NEVENTS * 2]; // * 2 because each loop can add up to 2 results here gint nresults; tp_kqueue_data *data; socket_io_data = p; data = socket_io_data->event_data; kfd = data->fd; thread = mono_thread_internal_current (); events = g_new0 (struct kevent, KQUEUE_NEVENTS); while (1) { mono_gc_set_skip_thread (TRUE); do { if (ready == -1) { if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } ready = kevent (kfd, NULL, 0, events, KQUEUE_NEVENTS, NULL); } while (ready == -1 && errno == EINTR); mono_gc_set_skip_thread (FALSE); if (ready == -1) { int err = errno; g_free (events); if (err != EBADF) g_warning ("kevent wait: %d %s", err, g_strerror (err)); return; } EnterCriticalSection (&socket_io_data->io_lock); if (socket_io_data->inited == 3) { g_free (events); LeaveCriticalSection (&socket_io_data->io_lock); return; /* cleanup called */ } nresults = 0; for (i = 0; i < ready; i++) { int fd; MonoMList *list; MonoObject *ares; evt = &events [i]; fd = evt->ident; list = mono_g_hash_table_lookup (socket_io_data->sock_to_state, GINT_TO_POINTER (fd)); if (list != NULL && (evt->filter == EVFILT_READ || (evt->flags & EV_ERROR) != 0)) { ares = get_io_event (&list, MONO_POLLIN); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL && (evt->filter == EVFILT_WRITE || (evt->flags & EV_ERROR) != 0)) { ares = get_io_event (&list, MONO_POLLOUT); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL) { int p; mono_g_hash_table_replace (socket_io_data->sock_to_state, GINT_TO_POINTER (fd), list); p = get_events_from_list (list); if (evt->filter == EVFILT_READ && (p & MONO_POLLIN) != 0) { EV_SET (evt, fd, EVFILT_READ, EV_ADD | EV_ENABLE | EV_ONESHOT, 0, 0, 0); kevent_change (kfd, evt, "READD read"); } if (evt->filter == EVFILT_WRITE && (p & MONO_POLLOUT) != 0) { EV_SET (evt, fd, EVFILT_WRITE, EV_ADD | EV_ENABLE | EV_ONESHOT, 0, 0, 0); kevent_change (kfd, evt, "READD write"); } } else { mono_g_hash_table_remove (socket_io_data->sock_to_state, GINT_TO_POINTER (fd)); } } LeaveCriticalSection (&socket_io_data->io_lock); threadpool_append_jobs (&async_io_tp, (MonoObject **) async_results, nresults); mono_gc_bzero (async_results, sizeof (gpointer) * nresults); } }
static void tp_epoll_wait (gpointer p) { SocketIOData *socket_io_data; int epollfd; struct epoll_event *events, *evt; int ready = 0, i; gpointer async_results [EPOLL_NEVENTS * 2]; // * 2 because each loop can add up to 2 results here gint nresults; tp_epoll_data *data; socket_io_data = p; data = socket_io_data->event_data; epollfd = data->epollfd; events = g_new0 (struct epoll_event, EPOLL_NEVENTS); while (1) { mono_gc_set_skip_thread (TRUE); do { if (ready == -1) { check_for_interruption_critical (); } ready = epoll_wait (epollfd, events, EPOLL_NEVENTS, -1); } while (ready == -1 && errno == EINTR); mono_gc_set_skip_thread (FALSE); if (ready == -1) { int err = errno; g_free (events); if (err != EBADF) g_warning ("epoll_wait: %d %s", err, g_strerror (err)); return; } EnterCriticalSection (&socket_io_data->io_lock); if (socket_io_data->inited == 3) { g_free (events); LeaveCriticalSection (&socket_io_data->io_lock); return; /* cleanup called */ } nresults = 0; for (i = 0; i < ready; i++) { int fd; MonoMList *list; MonoObject *ares; evt = &events [i]; fd = evt->data.fd; list = mono_g_hash_table_lookup (socket_io_data->sock_to_state, GINT_TO_POINTER (fd)); if (list != NULL && (evt->events & (EPOLLIN | EPOLL_ERRORS)) != 0) { ares = get_io_event (&list, MONO_POLLIN); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL && (evt->events & (EPOLLOUT | EPOLL_ERRORS)) != 0) { ares = get_io_event (&list, MONO_POLLOUT); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL) { int p; mono_g_hash_table_replace (socket_io_data->sock_to_state, GINT_TO_POINTER (fd), list); p = get_events_from_list (list); evt->events = (p & MONO_POLLOUT) ? EPOLLOUT : 0; evt->events |= (p & MONO_POLLIN) ? EPOLLIN : 0; if (epoll_ctl (epollfd, EPOLL_CTL_MOD, fd, evt) == -1) { if (epoll_ctl (epollfd, EPOLL_CTL_ADD, fd, evt) == -1) { int err = errno; g_message ("epoll(ADD): %d %s", err, g_strerror (err)); } } } else { mono_g_hash_table_remove (socket_io_data->sock_to_state, GINT_TO_POINTER (fd)); epoll_ctl (epollfd, EPOLL_CTL_DEL, fd, evt); } } LeaveCriticalSection (&socket_io_data->io_lock); threadpool_append_jobs (&async_io_tp, (MonoObject **) async_results, nresults); mono_gc_bzero (async_results, sizeof (gpointer) * nresults); } }
static void async_invoke_thread (gpointer data) { MonoDomain *domain; MonoInternalThread *thread; MonoWSQ *wsq; ThreadPool *tp; gboolean must_die; const gchar *name; tp = data; wsq = NULL; if (!tp->is_io) wsq = add_wsq (); thread = mono_thread_internal_current (); mono_profiler_thread_start (thread->tid); name = (tp->is_io) ? "IO Threadpool worker" : "Threadpool worker"; mono_thread_set_name_internal (thread, mono_string_new (mono_domain_get (), name), FALSE); if (tp_start_func) tp_start_func (tp_hooks_user_data); data = NULL; for (;;) { MonoAsyncResult *ar; MonoClass *klass; gboolean is_io_task; gboolean is_socket; int n_naps = 0; is_io_task = FALSE; ar = (MonoAsyncResult *) data; if (ar) { InterlockedIncrement (&tp->busy_threads); domain = ((MonoObject *)ar)->vtable->domain; #ifndef DISABLE_SOCKETS klass = ((MonoObject *) data)->vtable->klass; is_io_task = !is_corlib_asyncresult (domain, klass); is_socket = FALSE; if (is_io_task) { MonoSocketAsyncResult *state = (MonoSocketAsyncResult *) data; is_socket = is_socketasyncresult (domain, klass); ar = state->ares; switch (state->operation) { case AIO_OP_RECEIVE: state->total = ICALL_RECV (state); break; case AIO_OP_SEND: state->total = ICALL_SEND (state); break; } } #endif /* worker threads invokes methods in different domains, * so we need to set the right domain here */ g_assert (domain); if (mono_domain_is_unloading (domain) || mono_runtime_is_shutting_down ()) { threadpool_jobs_dec ((MonoObject *)ar); data = NULL; ar = NULL; InterlockedDecrement (&tp->busy_threads); } else { mono_thread_push_appdomain_ref (domain); if (threadpool_jobs_dec ((MonoObject *)ar)) { data = NULL; ar = NULL; mono_thread_pop_appdomain_ref (); InterlockedDecrement (&tp->busy_threads); continue; } if (mono_domain_set (domain, FALSE)) { MonoObject *exc; if (tp_item_begin_func) tp_item_begin_func (tp_item_user_data); if (!is_io_task && ar->add_time > 0) process_idle_times (tp, ar->add_time); exc = mono_async_invoke (tp, ar); if (tp_item_end_func) tp_item_end_func (tp_item_user_data); if (exc) mono_internal_thread_unhandled_exception (exc); if (is_socket && tp->is_io) { MonoSocketAsyncResult *state = (MonoSocketAsyncResult *) data; if (state->completed && state->callback) { MonoAsyncResult *cb_ares; cb_ares = create_simple_asyncresult ((MonoObject *) state->callback, (MonoObject *) state); icall_append_job ((MonoObject *) cb_ares); } } mono_domain_set (mono_get_root_domain (), TRUE); } mono_thread_pop_appdomain_ref (); InterlockedDecrement (&tp->busy_threads); /* If the callee changes the background status, set it back to TRUE */ mono_thread_clr_state (thread , ~ThreadState_Background); if (!mono_thread_test_state (thread , ThreadState_Background)) ves_icall_System_Threading_Thread_SetState (thread, ThreadState_Background); } } ar = NULL; data = NULL; must_die = should_i_die (tp); if (!must_die && (tp->is_io || !mono_wsq_local_pop (&data))) dequeue_or_steal (tp, &data, wsq); n_naps = 0; while (!must_die && !data && n_naps < 4) { gboolean res; InterlockedIncrement (&tp->waiting); // Another thread may have added a job into its wsq since the last call to dequeue_or_steal // Check all the queues again before entering the wait loop dequeue_or_steal (tp, &data, wsq); if (data) { InterlockedDecrement (&tp->waiting); break; } mono_gc_set_skip_thread (TRUE); #if defined(__OpenBSD__) while (mono_cq_count (tp->queue) == 0 && (res = mono_sem_wait (&tp->new_job, TRUE)) == -1) {// && errno == EINTR) { #else while (mono_cq_count (tp->queue) == 0 && (res = mono_sem_timedwait (&tp->new_job, 2000, TRUE)) == -1) {// && errno == EINTR) { #endif if (mono_runtime_is_shutting_down ()) break; if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } InterlockedDecrement (&tp->waiting); mono_gc_set_skip_thread (FALSE); if (mono_runtime_is_shutting_down ()) break; must_die = should_i_die (tp); dequeue_or_steal (tp, &data, wsq); n_naps++; } if (!data && !tp->is_io && !mono_runtime_is_shutting_down ()) { mono_wsq_local_pop (&data); if (data && must_die) { InterlockedCompareExchange (&tp->destroy_thread, 1, 0); pulse_on_new_job (tp); } } if (!data) { gint nt; gboolean down; while (1) { nt = tp->nthreads; down = mono_runtime_is_shutting_down (); if (!down && nt <= tp->min_threads) break; if (down || InterlockedCompareExchange (&tp->nthreads, nt - 1, nt) == nt) { mono_perfcounter_update_value (tp->pc_nthreads, TRUE, -1); if (!tp->is_io) { remove_wsq (wsq); } mono_profiler_thread_end (thread->tid); if (tp_finish_func) tp_finish_func (tp_hooks_user_data); return; } } } } g_assert_not_reached (); } void ves_icall_System_Threading_ThreadPool_GetAvailableThreads (gint *workerThreads, gint *completionPortThreads) { *workerThreads = async_tp.max_threads - async_tp.busy_threads; *completionPortThreads = async_io_tp.max_threads - async_io_tp.busy_threads; }
static void tp_poll_wait (gpointer p) { #if MONO_SMALL_CONFIG #define INITIAL_POLLFD_SIZE 128 #else #define INITIAL_POLLFD_SIZE 1024 #endif #define POLL_ERRORS (MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL) #ifdef DISABLE_SOCKETS #define socket_io_cleanup(x) #endif mono_pollfd *pfds; gint maxfd = 1; gint allocated; gint i; MonoInternalThread *thread; tp_poll_data *data; SocketIOData *socket_io_data = p; MonoPtrArray async_results; gint nresults; thread = mono_thread_internal_current (); data = socket_io_data->event_data; allocated = INITIAL_POLLFD_SIZE; pfds = g_new0 (mono_pollfd, allocated); mono_ptr_array_init (async_results, allocated * 2); INIT_POLLFD (pfds, data->pipe [0], MONO_POLLIN); for (i = 1; i < allocated; i++) INIT_POLLFD (&pfds [i], -1, 0); while (1) { int nsock = 0; mono_pollfd *pfd; char one [1]; MonoMList *list; MonoObject *ares; mono_gc_set_skip_thread (TRUE); do { if (nsock == -1) { if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } nsock = mono_poll (pfds, maxfd, -1); } while (nsock == -1 && errno == EINTR); mono_gc_set_skip_thread (FALSE); /* * Apart from EINTR, we only check EBADF, for the rest: * EINVAL: mono_poll() 'protects' us from descriptor * numbers above the limit if using select() by marking * then as MONO_POLLERR. If a system poll() is being * used, the number of descriptor we're passing will not * be over sysconf(_SC_OPEN_MAX), as the error would have * happened when opening. * * EFAULT: we own the memory pointed by pfds. * ENOMEM: we're doomed anyway * */ if (nsock == -1 && errno == EBADF) { pfds->revents = 0; /* Just in case... */ nsock = mark_bad_fds (pfds, maxfd); } if ((pfds->revents & POLL_ERRORS) != 0) { /* We're supposed to die now, as the pipe has been closed */ g_free (pfds); mono_ptr_array_destroy (async_results); socket_io_cleanup (socket_io_data); return; } /* Got a new socket */ if ((pfds->revents & MONO_POLLIN) != 0) { int nread; gboolean found = FALSE; for (i = 1; i < allocated; i++) { pfd = &pfds [i]; if (pfd->fd == data->newpfd.fd) { found = TRUE; break; } } if (!found) { for (i = 1; i < allocated; i++) { pfd = &pfds [i]; if (pfd->fd == -1) break; } } if (i == allocated) { mono_pollfd *oldfd; oldfd = pfds; i = allocated; allocated = allocated * 2; pfds = g_renew (mono_pollfd, oldfd, allocated); g_free (oldfd); for (; i < allocated; i++) INIT_POLLFD (&pfds [i], -1, 0); //async_results = g_renew (gpointer, async_results, allocated * 2); } #ifndef HOST_WIN32 nread = read (data->pipe [0], one, 1); #else nread = recv ((SOCKET) data->pipe [0], one, 1, 0); #endif if (nread <= 0) { g_free (pfds); mono_ptr_array_destroy (async_results); return; /* we're closed */ } INIT_POLLFD (&pfds [i], data->newpfd.fd, data->newpfd.events); memset (&data->newpfd, 0, sizeof (mono_pollfd)); MONO_SEM_POST (&data->new_sem); if (i >= maxfd) maxfd = i + 1; nsock--; } if (nsock == 0) continue; EnterCriticalSection (&socket_io_data->io_lock); if (socket_io_data->inited == 3) { g_free (pfds); mono_ptr_array_destroy (async_results); LeaveCriticalSection (&socket_io_data->io_lock); return; /* cleanup called */ } nresults = 0; mono_ptr_array_clear (async_results); for (i = 1; i < maxfd && nsock > 0; i++) { pfd = &pfds [i]; if (pfd->fd == -1 || pfd->revents == 0) continue; nsock--; list = mono_g_hash_table_lookup (socket_io_data->sock_to_state, GINT_TO_POINTER (pfd->fd)); if (list != NULL && (pfd->revents & (MONO_POLLIN | POLL_ERRORS)) != 0) { ares = get_io_event (&list, MONO_POLLIN); if (ares != NULL) { mono_ptr_array_append (async_results, ares); ++nresults; } } if (list != NULL && (pfd->revents & (MONO_POLLOUT | POLL_ERRORS)) != 0) { ares = get_io_event (&list, MONO_POLLOUT); if (ares != NULL) { mono_ptr_array_append (async_results, ares); ++nresults; } } if (list != NULL) { mono_g_hash_table_replace (socket_io_data->sock_to_state, GINT_TO_POINTER (pfd->fd), list); pfd->events = get_events_from_list (list); } else { mono_g_hash_table_remove (socket_io_data->sock_to_state, GINT_TO_POINTER (pfd->fd)); pfd->fd = -1; if (i == maxfd - 1) maxfd--; } } LeaveCriticalSection (&socket_io_data->io_lock); threadpool_append_jobs (&async_io_tp, (MonoObject **) async_results.data, nresults); mono_ptr_array_clear (async_results); } }