SOCKET mono_w32socket_accept (SOCKET s, struct sockaddr *addr, socklen_t *addrlen, gboolean blocking) { MonoInternalThread *curthread = mono_thread_internal_current (); SOCKET newsock = INVALID_SOCKET; ALERTABLE_SOCKET_CALL (FD_ACCEPT_BIT, blocking, TRUE, newsock, accept, s, addr, addrlen); return newsock; }
void ves_icall_System_GC_WaitForPendingFinalizers (void) { #ifndef HAVE_NULL_GC if (!mono_gc_pending_finalizers ()) return; if (mono_thread_internal_current () == gc_thread) /* Avoid deadlocks */ return; /* If the finalizer thread is not live, lets pretend no finalizers are pending since the current thread might be the one responsible for starting it up. */ if (gc_thread == NULL) return; ResetEvent (pending_done_event); mono_gc_finalize_notify (); /* g_print ("Waiting for pending finalizers....\n"); */ WaitForSingleObjectEx (pending_done_event, INFINITE, TRUE); /* g_print ("Done pending....\n"); */ #endif }
int mono_w32socket_recv (SOCKET s, char *buf, int len, int flags, gboolean blocking) { MonoInternalThread *curthread = mono_thread_internal_current (); int ret = SOCKET_ERROR; ALERTABLE_SOCKET_CALL (FD_READ_BIT, blocking, TRUE, ret, recv, s, buf, len, flags); return ret; }
static gboolean mutex_handle_own (MonoW32Handle *handle_data, gboolean *abandoned) { MonoW32HandleMutex *mutex_handle; *abandoned = FALSE; mutex_handle = (MonoW32HandleMutex*) handle_data->specific; mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_MUTEX, "%s: owning %s handle %p, before: [tid: %p, recursion: %d], after: [tid: %p, recursion: %d], abandoned: %s", __func__, mono_w32handle_get_typename (handle_data->type), handle_data, (gpointer) mutex_handle->tid, mutex_handle->recursion, (gpointer) pthread_self (), mutex_handle->recursion + 1, mutex_handle->abandoned ? "true" : "false"); if (mutex_handle->recursion != 0) { g_assert (pthread_equal (pthread_self (), mutex_handle->tid)); mutex_handle->recursion++; } else { mutex_handle->tid = pthread_self (); mutex_handle->recursion = 1; thread_own_mutex (mono_thread_internal_current (), handle_data, handle_data); } if (mutex_handle->abandoned) { mutex_handle->abandoned = FALSE; *abandoned = TRUE; } mono_w32handle_set_signal_state (handle_data, FALSE, FALSE); return TRUE; }
static void mutex_handle_signal (MonoW32Handle *handle_data) { MonoW32HandleMutex *mutex_handle; pthread_t tid; mutex_handle = (MonoW32HandleMutex*) handle_data->specific; mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_MUTEX, "%s: signalling %s handle %p, tid: %p recursion: %d", __func__, mono_w32handle_get_typename (handle_data->type), handle_data, (gpointer) mutex_handle->tid, mutex_handle->recursion); tid = pthread_self (); if (mutex_handle->abandoned) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_MUTEX, "%s: %s handle %p is abandoned", __func__, mono_w32handle_get_typename (handle_data->type), handle_data); } else if (!pthread_equal (mutex_handle->tid, tid)) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_MUTEX, "%s: we don't own %s handle %p (owned by %ld, me %ld)", __func__, mono_w32handle_get_typename (handle_data->type), handle_data, (long)mutex_handle->tid, (long)tid); } else { /* OK, we own this mutex */ mutex_handle->recursion--; if (mutex_handle->recursion == 0) { thread_disown_mutex (mono_thread_internal_current (), handle_data); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_MUTEX, "%s: unlocking %s handle %p, tid: %p recusion : %d", __func__, mono_w32handle_get_typename (handle_data->type), handle_data, (gpointer) mutex_handle->tid, mutex_handle->recursion); mutex_handle->tid = 0; mono_w32handle_set_signal_state (handle_data, TRUE, FALSE); } } }
MonoBoolean ves_icall_System_Threading_Mutex_ReleaseMutex_internal (gpointer handle) { MonoW32Handle *handle_data; MonoW32HandleMutex *mutex_handle; pthread_t tid; gboolean ret; if (!mono_w32handle_lookup_and_ref (handle, &handle_data)) { g_warning ("%s: unkown handle %p", __func__, handle); mono_w32error_set_last (ERROR_INVALID_HANDLE); return FALSE; } if (handle_data->type != MONO_W32TYPE_MUTEX && handle_data->type != MONO_W32TYPE_NAMEDMUTEX) { g_warning ("%s: unknown mutex handle %p", __func__, handle); mono_w32error_set_last (ERROR_INVALID_HANDLE); mono_w32handle_unref (handle_data); return FALSE; } mutex_handle = (MonoW32HandleMutex*) handle_data->specific; mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_MUTEX, "%s: releasing %s handle %p, tid: %p recursion: %d", __func__, mono_w32handle_get_typename (handle_data->type), handle, (gpointer) mutex_handle->tid, mutex_handle->recursion); mono_w32handle_lock (handle_data); tid = pthread_self (); if (mutex_handle->abandoned) { // The Win32 ReleaseMutex() function returns TRUE for abandoned mutexes ret = TRUE; } else if (!pthread_equal (mutex_handle->tid, tid)) { ret = FALSE; mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_MUTEX, "%s: we don't own %s handle %p (owned by %ld, me %ld)", __func__, mono_w32handle_get_typename (handle_data->type), handle, (long)mutex_handle->tid, (long)tid); } else { ret = TRUE; /* OK, we own this mutex */ mutex_handle->recursion--; if (mutex_handle->recursion == 0) { thread_disown_mutex (mono_thread_internal_current (), handle); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_MUTEX, "%s: unlocking %s handle %p, tid: %p recusion : %d", __func__, mono_w32handle_get_typename (handle_data->type), handle, (gpointer) mutex_handle->tid, mutex_handle->recursion); mutex_handle->tid = 0; mono_w32handle_set_signal_state (handle_data, TRUE, FALSE); } } mono_w32handle_unlock (handle_data); mono_w32handle_unref (handle_data); return ret; }
static gboolean is_ip_in_managed_allocator (MonoDomain *domain, gpointer ip) { MonoJitInfo *ji; if (!mono_thread_internal_current ()) /* Happens during thread attach */ return FALSE; if (!ip || !domain) return FALSE; if (!sgen_has_critical_method ()) return FALSE; /* * mono_jit_info_table_find is not async safe since it calls into the AOT runtime to load information for * missing methods (#13951). To work around this, we disable the AOT fallback. For this to work, the JIT needs * to register the jit info for all GC critical methods after they are JITted/loaded. */ ji = mono_jit_info_table_find_internal (domain, (char *)ip, FALSE, FALSE); if (!ji) return FALSE; return sgen_is_critical_method (mono_jit_info_get_method (ji)); }
static gboolean mutex_handle_own (gpointer handle, MonoW32HandleType type, gboolean *abandoned) { MonoW32HandleMutex *mutex_handle; *abandoned = FALSE; if (!mono_w32handle_lookup (handle, type, (gpointer *)&mutex_handle)) { g_warning ("%s: error looking up %s handle %p", __func__, mono_w32handle_get_typename (type), handle); return FALSE; } mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: owning %s handle %p, before: [tid: %p, recursion: %d], after: [tid: %p, recursion: %d], abandoned: %s", __func__, mono_w32handle_get_typename (type), handle, (gpointer) mutex_handle->tid, mutex_handle->recursion, (gpointer) pthread_self (), mutex_handle->recursion + 1, mutex_handle->abandoned ? "true" : "false"); if (mutex_handle->recursion != 0) { g_assert (pthread_equal (pthread_self (), mutex_handle->tid)); mutex_handle->recursion++; } else { mutex_handle->tid = pthread_self (); mutex_handle->recursion = 1; thread_own_mutex (mono_thread_internal_current (), handle); } if (mutex_handle->abandoned) { mutex_handle->abandoned = FALSE; *abandoned = TRUE; } mono_w32handle_set_signal_state (handle, FALSE, FALSE); return TRUE; }
static void clear_thread_state (void) { MonoInternalThread *thread = mono_thread_internal_current (); /* If the callee changes the background status, set it back to TRUE */ mono_thread_clr_state (thread , ~ThreadState_Background); if (!mono_thread_test_state (thread , ThreadState_Background)) ves_icall_System_Threading_Thread_SetState (thread, ThreadState_Background); }
static void monitor_thread (gpointer unused) { ThreadPool *pools [2]; MonoInternalThread *thread; guint32 ms; gboolean need_one; int i; pools [0] = &async_tp; pools [1] = &async_io_tp; thread = mono_thread_internal_current (); ves_icall_System_Threading_Thread_SetName_internal (thread, mono_string_new (mono_domain_get (), "Threadpool monitor")); while (1) { ms = 500; i = 10; //number of spurious awakes we tolerate before doing a round of rebalancing. do { guint32 ts; ts = mono_msec_ticks (); if (SleepEx (ms, TRUE) == 0) break; ms -= (mono_msec_ticks () - ts); if (mono_runtime_is_shutting_down ()) break; if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } while (ms > 0 && i--); if (mono_runtime_is_shutting_down ()) break; if (suspended) continue; for (i = 0; i < 2; i++) { ThreadPool *tp; tp = pools [i]; if (tp->waiting > 0) continue; need_one = (mono_cq_count (tp->queue) > 0); if (!need_one && !tp->is_io) { EnterCriticalSection (&wsqs_lock); for (i = 0; wsqs != NULL && i < wsqs->len; i++) { MonoWSQ *wsq; wsq = g_ptr_array_index (wsqs, i); if (mono_wsq_count (wsq) != 0) { need_one = TRUE; break; } } LeaveCriticalSection (&wsqs_lock); } if (need_one) threadpool_start_thread (tp); } } }
static void set_tp_thread_info (ThreadPool *tp) { const gchar *name; MonoInternalThread *thread = mono_thread_internal_current (); mono_profiler_thread_start (thread->tid); name = (tp->is_io) ? "IO Threadpool worker" : "Threadpool worker"; mono_thread_set_name_internal (thread, mono_string_new (mono_domain_get (), name), FALSE); }
SIG_HANDLER_FUNC (static, sigabrt_signal_handler) { MonoJitInfo *ji = NULL; GET_CONTEXT; if (mono_thread_internal_current ()) ji = mono_jit_info_table_find (mono_domain_get (), mono_arch_ip_from_context(ctx)); if (!ji) { if (mono_chain_signal (SIG_HANDLER_PARAMS)) return; mono_handle_native_sigsegv (SIGABRT, ctx); } }
void check_for_interruption_critical (void) { MonoInternalThread *thread; /*RULE NUMBER ONE OF SKIP_THREAD: NEVER POKE MANAGED STATE.*/ mono_gc_set_skip_thread (FALSE); thread = mono_thread_internal_current (); if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); /*RULE NUMBER TWO OF SKIP_THREAD: READ RULE NUMBER ONE.*/ mono_gc_set_skip_thread (TRUE); }
MONO_SIG_HANDLER_FUNC (static, sigabrt_signal_handler) { MonoJitInfo *ji = NULL; MONO_SIG_HANDLER_INFO_TYPE *info = MONO_SIG_HANDLER_GET_INFO (); MONO_SIG_HANDLER_GET_CONTEXT; if (mono_thread_internal_current ()) ji = mono_jit_info_table_find_internal (mono_domain_get (), mono_arch_ip_from_context (ctx), TRUE, TRUE); if (!ji) { if (mono_chain_signal (MONO_SIG_HANDLER_PARAMS)) return; mono_handle_native_crash ("SIGABRT", ctx, info); } }
void ves_icall_System_GC_WaitForPendingFinalizers (void) { #ifndef HAVE_NULL_GC if (!mono_gc_pending_finalizers ()) return; if (mono_thread_internal_current () == gc_thread) /* Avoid deadlocks */ return; ResetEvent (pending_done_event); mono_gc_finalize_notify (); /* g_print ("Waiting for pending finalizers....\n"); */ WaitForSingleObjectEx (pending_done_event, INFINITE, TRUE); /* g_print ("Done pending....\n"); */ #endif }
void mono_internal_thread_unhandled_exception (MonoObject* exc) { if (mono_runtime_unhandled_exception_policy_get () == MONO_UNHANDLED_POLICY_CURRENT) { gboolean unloaded; MonoClass *klass; klass = exc->vtable->klass; unloaded = is_appdomainunloaded_exception (exc->vtable->domain, klass); if (!unloaded && klass != mono_defaults.threadabortexception_class) { mono_unhandled_exception (exc); if (mono_environment_exitcode_get () == 1) exit (255); } if (klass == mono_defaults.threadabortexception_class) mono_thread_internal_reset_abort (mono_thread_internal_current ()); } }
static gint epoll_event_wait (void (*callback) (gint fd, gint events, gpointer user_data), gpointer user_data) { gint i, ready; memset (epoll_events, 0, sizeof (struct epoll_event) * EPOLL_NEVENTS); mono_gc_set_skip_thread (TRUE); MONO_ENTER_GC_SAFE; ready = epoll_wait (epoll_fd, epoll_events, EPOLL_NEVENTS, -1); MONO_EXIT_GC_SAFE; mono_gc_set_skip_thread (FALSE); if (ready == -1) { switch (errno) { case EINTR: mono_thread_internal_check_for_interruption_critical (mono_thread_internal_current ()); ready = 0; break; default: g_error ("epoll_event_wait: epoll_wait () failed, error (%d) %s", errno, g_strerror (errno)); break; } } if (ready == -1) return -1; for (i = 0; i < ready; ++i) { gint fd, events = 0; fd = epoll_events [i].data.fd; if (epoll_events [i].events & (EPOLLIN | EPOLLERR | EPOLLHUP)) events |= EVENT_IN; if (epoll_events [i].events & (EPOLLOUT | EPOLLERR | EPOLLHUP)) events |= EVENT_OUT; callback (fd, events, user_data); } return 0; }
static gint kqueue_event_wait (void (*callback) (gint fd, gint events, gpointer user_data), gpointer user_data) { gint i, ready; memset (kqueue_events, 0, sizeof (struct kevent) * KQUEUE_NEVENTS); mono_gc_set_skip_thread (TRUE); ready = kevent (kqueue_fd, NULL, 0, kqueue_events, KQUEUE_NEVENTS, NULL); mono_gc_set_skip_thread (FALSE); if (ready == -1) { switch (errno) { case EINTR: mono_thread_internal_check_for_interruption_critical (mono_thread_internal_current ()); ready = 0; break; default: g_error ("kqueue_event_wait: kevent () failed, error (%d) %s", errno, g_strerror (errno)); break; } } if (ready == -1) return -1; for (i = 0; i < ready; ++i) { gint fd, events = 0; fd = kqueue_events [i].ident; if (kqueue_events [i].filter == EVFILT_READ || (kqueue_events [i].flags & EV_ERROR) != 0) events |= EVENT_IN; if (kqueue_events [i].filter == EVFILT_WRITE || (kqueue_events [i].flags & EV_ERROR) != 0) events |= EVENT_OUT; callback (fd, events, user_data); } return 0; }
static gint kqueue_event_wait (void) { gint ready; ready = kevent (kqueue_fd, NULL, 0, kqueue_events, KQUEUE_NEVENTS, NULL); if (ready == -1) { switch (errno) { case EINTR: mono_thread_internal_check_for_interruption_critical (mono_thread_internal_current ()); ready = 0; break; default: g_warning ("kqueue_event_wait: kevent () failed, error (%d) %s", errno, g_strerror (errno)); break; } } return ready; }
static gint epoll_event_wait (void) { gint ready; ready = epoll_wait (epoll_fd, epoll_events, EPOLL_NEVENTS, -1); if (ready == -1) { switch (errno) { case EINTR: mono_thread_internal_check_for_interruption_critical (mono_thread_internal_current ()); ready = 0; break; default: g_warning ("epoll_event_wait: epoll_wait () failed, error (%d) %s", errno, g_strerror (errno)); break; } } return ready; }
static gint poll_event_wait (void (*callback) (gint fd, gint events, gpointer user_data), gpointer user_data) { gint i, ready; for (i = 0; i < poll_fds_size; ++i) poll_fds [i].revents = 0; mono_gc_set_skip_thread (TRUE); MONO_ENTER_GC_SAFE; ready = mono_poll (poll_fds, poll_fds_size, -1); MONO_EXIT_GC_SAFE; mono_gc_set_skip_thread (FALSE); if (ready == -1) { /* * Apart from EINTR, we only check EBADF, for the rest: * EINVAL: mono_poll() 'protects' us from descriptor * numbers above the limit if using select() by marking * then as POLLERR. If a system poll() is being * used, the number of descriptor we're passing will not * be over sysconf(_SC_OPEN_MAX), as the error would have * happened when opening. * * EFAULT: we own the memory pointed by pfds. * ENOMEM: we're doomed anyway * */ #if !defined(HOST_WIN32) switch (errno) #else switch (WSAGetLastError ()) #endif { #if !defined(HOST_WIN32) case EINTR: #else case WSAEINTR: #endif { mono_thread_internal_check_for_interruption_critical (mono_thread_internal_current ()); ready = 0; break; } #if !defined(HOST_WIN32) case EBADF: #else case WSAEBADF: #endif { ready = poll_mark_bad_fds (poll_fds, poll_fds_size); break; } default: #if !defined(HOST_WIN32) g_error ("poll_event_wait: mono_poll () failed, error (%d) %s", errno, g_strerror (errno)); #else g_error ("poll_event_wait: mono_poll () failed, error (%d)\n", WSAGetLastError ()); #endif break; } } if (ready == -1) return -1; if (ready == 0) return 0; g_assert (ready > 0); for (i = 0; i < poll_fds_size; ++i) { gint fd, events = 0; if (poll_fds [i].fd == -1) continue; if (poll_fds [i].revents == 0) continue; fd = poll_fds [i].fd; if (poll_fds [i].revents & (MONO_POLLIN | MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL)) events |= EVENT_IN; if (poll_fds [i].revents & (MONO_POLLOUT | MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL)) events |= EVENT_OUT; if (poll_fds [i].revents & (MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL)) events |= EVENT_ERR; callback (fd, events, user_data); if (--ready == 0) break; } return 0; }
static void SIG_HANDLER_SIGNATURE (sigusr1_signal_handler) { gboolean running_managed; MonoException *exc; MonoInternalThread *thread = mono_thread_internal_current (); MonoDomain *domain = mono_domain_get (); void *ji; GET_CONTEXT; if (!thread || !domain) /* The thread might not have started up yet */ /* FIXME: Specify the synchronization with start_wrapper () in threads.c */ return; if (thread->thread_dump_requested) { thread->thread_dump_requested = FALSE; mono_print_thread_dump (ctx); } /* * This is an async signal, so the code below must not call anything which * is not async safe. That includes the pthread locking functions. If we * know that we interrupted managed code, then locking is safe. */ /* * On OpenBSD, ctx can be NULL if we are interrupting poll (). */ if (ctx) { ji = mono_jit_info_table_find (mono_domain_get (), mono_arch_ip_from_context(ctx)); running_managed = ji != NULL; if (mono_debugger_agent_thread_interrupt (ctx, ji)) return; } else { running_managed = FALSE; } /* We can't do handler block checking from metadata since it requires doing * a stack walk with context. * * FIXME add full-aot support. */ #ifdef MONO_ARCH_HAVE_SIGCTX_TO_MONOCTX if (!mono_aot_only && ctx) { MonoThreadUnwindState unwind_state; if (mono_thread_state_init_from_sigctx (&unwind_state, ctx)) { if (mono_install_handler_block_guard (&unwind_state)) { #ifndef HOST_WIN32 /*Clear current thread from been wapi interrupted otherwise things can go south*/ wapi_clear_interruption (); #endif return; } } } #endif exc = mono_thread_request_interruption (running_managed); if (!exc) return; mono_arch_handle_exception (ctx, exc, FALSE); }
static void tp_poll_wait (gpointer p) { #if MONO_SMALL_CONFIG #define INITIAL_POLLFD_SIZE 128 #else #define INITIAL_POLLFD_SIZE 1024 #endif #define POLL_ERRORS (MONO_POLLERR | MONO_POLLHUP | MONO_POLLNVAL) mono_pollfd *pfds; gint maxfd = 1; gint allocated; gint i; MonoInternalThread *thread; tp_poll_data *data; SocketIOData *socket_io_data = p; gpointer *async_results; gint nresults; thread = mono_thread_internal_current (); data = socket_io_data->event_data; allocated = INITIAL_POLLFD_SIZE; pfds = g_new0 (mono_pollfd, allocated); async_results = g_new0 (gpointer, allocated * 2); INIT_POLLFD (pfds, data->pipe [0], MONO_POLLIN); for (i = 1; i < allocated; i++) INIT_POLLFD (&pfds [i], -1, 0); while (1) { int nsock = 0; mono_pollfd *pfd; char one [1]; MonoMList *list; MonoObject *ares; do { if (nsock == -1) { if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } nsock = mono_poll (pfds, maxfd, -1); } while (nsock == -1 && errno == EINTR); /* * Apart from EINTR, we only check EBADF, for the rest: * EINVAL: mono_poll() 'protects' us from descriptor * numbers above the limit if using select() by marking * then as MONO_POLLERR. If a system poll() is being * used, the number of descriptor we're passing will not * be over sysconf(_SC_OPEN_MAX), as the error would have * happened when opening. * * EFAULT: we own the memory pointed by pfds. * ENOMEM: we're doomed anyway * */ if (nsock == -1 && errno == EBADF) { pfds->revents = 0; /* Just in case... */ nsock = mark_bad_fds (pfds, maxfd); } if ((pfds->revents & POLL_ERRORS) != 0) { /* We're supposed to die now, as the pipe has been closed */ g_free (pfds); g_free (async_results); socket_io_cleanup (socket_io_data); return; } /* Got a new socket */ if ((pfds->revents & MONO_POLLIN) != 0) { int nread; for (i = 1; i < allocated; i++) { pfd = &pfds [i]; if (pfd->fd == -1 || pfd->fd == data->newpfd.fd) break; } if (i == allocated) { mono_pollfd *oldfd; oldfd = pfds; i = allocated; allocated = allocated * 2; pfds = g_renew (mono_pollfd, oldfd, allocated); g_free (oldfd); for (; i < allocated; i++) INIT_POLLFD (&pfds [i], -1, 0); async_results = g_renew (gpointer, async_results, allocated * 2); } #ifndef HOST_WIN32 nread = read (data->pipe [0], one, 1); #else nread = recv ((SOCKET) data->pipe [0], one, 1, 0); #endif if (nread <= 0) { g_free (pfds); g_free (async_results); return; /* we're closed */ } INIT_POLLFD (&pfds [i], data->newpfd.fd, data->newpfd.events); memset (&data->newpfd, 0, sizeof (mono_pollfd)); MONO_SEM_POST (&data->new_sem); if (i >= maxfd) maxfd = i + 1; nsock--; } if (nsock == 0) continue; EnterCriticalSection (&socket_io_data->io_lock); if (socket_io_data->inited == 3) { g_free (pfds); g_free (async_results); LeaveCriticalSection (&socket_io_data->io_lock); return; /* cleanup called */ } nresults = 0; for (i = 1; i < maxfd && nsock > 0; i++) { pfd = &pfds [i]; if (pfd->fd == -1 || pfd->revents == 0) continue; nsock--; list = mono_g_hash_table_lookup (socket_io_data->sock_to_state, GINT_TO_POINTER (pfd->fd)); if (list != NULL && (pfd->revents & (MONO_POLLIN | POLL_ERRORS)) != 0) { ares = get_io_event (&list, MONO_POLLIN); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL && (pfd->revents & (MONO_POLLOUT | POLL_ERRORS)) != 0) { ares = get_io_event (&list, MONO_POLLOUT); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL) { mono_g_hash_table_replace (socket_io_data->sock_to_state, GINT_TO_POINTER (pfd->fd), list); pfd->events = get_events_from_list (list); } else { mono_g_hash_table_remove (socket_io_data->sock_to_state, GINT_TO_POINTER (pfd->fd)); pfd->fd = -1; if (i == maxfd - 1) maxfd--; } } LeaveCriticalSection (&socket_io_data->io_lock); threadpool_append_jobs (&async_io_tp, (MonoObject **) async_results, nresults); memset (async_results, 0, sizeof (gpointer) * nresults); } }
gboolean mono_domain_finalize (MonoDomain *domain, guint32 timeout) { DomainFinalizationReq *req; guint32 res; HANDLE done_event; MonoInternalThread *thread = mono_thread_internal_current (); if (mono_thread_internal_current () == gc_thread) /* We are called from inside a finalizer, not much we can do here */ return FALSE; /* * No need to create another thread 'cause the finalizer thread * is still working and will take care of running the finalizers */ #ifndef HAVE_NULL_GC if (gc_disabled) return TRUE; mono_gc_collect (mono_gc_max_generation ()); done_event = CreateEvent (NULL, TRUE, FALSE, NULL); if (done_event == NULL) { return FALSE; } req = g_new0 (DomainFinalizationReq, 1); req->domain = domain; req->done_event = done_event; if (domain == mono_get_root_domain ()) finalizing_root_domain = TRUE; mono_finalizer_lock (); domains_to_finalize = g_slist_append (domains_to_finalize, req); mono_finalizer_unlock (); /* Tell the finalizer thread to finalize this appdomain */ mono_gc_finalize_notify (); if (timeout == -1) timeout = INFINITE; while (TRUE) { res = WaitForSingleObjectEx (done_event, timeout, TRUE); /* printf ("WAIT RES: %d.\n", res); */ if (res == WAIT_IO_COMPLETION) { if ((thread->state & (ThreadState_StopRequested | ThreadState_SuspendRequested)) != 0) return FALSE; } else if (res == WAIT_TIMEOUT) { /* We leak the handle here */ return FALSE; } else { break; } } CloseHandle (done_event); if (domain == mono_get_root_domain ()) { mono_thread_pool_cleanup (); mono_gc_finalize_threadpool_threads (); } return TRUE; #else /* We don't support domain finalization without a GC */ return FALSE; #endif }
static void ensure_cleanedup (void) { if (status == STATUS_NOT_INITIALIZED && InterlockedCompareExchange (&status, STATUS_CLEANED_UP, STATUS_NOT_INITIALIZED) == STATUS_NOT_INITIALIZED) return; if (status == STATUS_INITIALIZING) { while (status == STATUS_INITIALIZING) mono_thread_info_yield (); } if (status == STATUS_CLEANED_UP) return; if (status == STATUS_CLEANING_UP || InterlockedCompareExchange (&status, STATUS_CLEANING_UP, STATUS_INITIALIZED) != STATUS_INITIALIZED) { while (status == STATUS_CLEANING_UP) mono_thread_info_yield (); g_assert (status == STATUS_CLEANED_UP); return; } /* we make the assumption along the code that we are * cleaning up only if the runtime is shutting down */ g_assert (mono_runtime_is_shutting_down ()); /* Unpark all worker threads */ mono_mutex_lock (&threadpool->parked_threads_lock); for (;;) { guint i; ThreadPoolCounter counter = COUNTER_READ (); if (counter._.active == 0 && counter._.parked == 0) break; if (counter._.active == 1) { MonoInternalThread *thread = mono_thread_internal_current (); if (thread->threadpool_thread) { /* if there is only one active thread * left and it's the current one */ break; } } for (i = 0; i < threadpool->parked_threads->len; ++i) { mono_cond_t *cond = (mono_cond_t*) g_ptr_array_index (threadpool->parked_threads, i); mono_cond_signal (cond); } mono_mutex_unlock (&threadpool->parked_threads_lock); usleep (1000); mono_mutex_lock (&threadpool->parked_threads_lock); } mono_mutex_unlock (&threadpool->parked_threads_lock); while (monitor_status != MONITOR_STATUS_NOT_RUNNING) usleep (1000); g_ptr_array_free (threadpool->domains, TRUE); mono_mutex_destroy (&threadpool->domains_lock); g_ptr_array_free (threadpool->parked_threads, TRUE); mono_mutex_destroy (&threadpool->parked_threads_lock); g_ptr_array_free (threadpool->working_threads, TRUE); mono_mutex_destroy (&threadpool->working_threads_lock); mono_mutex_destroy (&threadpool->heuristic_lock); g_free (threadpool->heuristic_hill_climbing.samples); g_free (threadpool->heuristic_hill_climbing.thread_counts); rand_free (threadpool->heuristic_hill_climbing.random_interval_generator); g_free (threadpool->cpu_usage_state); g_assert (threadpool); g_free (threadpool); threadpool = NULL; g_assert (!threadpool); status = STATUS_CLEANED_UP; }
static void tp_epoll_wait (gpointer p) { SocketIOData *socket_io_data; int epollfd; MonoInternalThread *thread; struct epoll_event *events, *evt; int ready = 0, i; gpointer async_results [EPOLL_NEVENTS * 2]; // * 2 because each loop can add up to 2 results here gint nresults; tp_epoll_data *data; socket_io_data = p; data = socket_io_data->event_data; epollfd = data->epollfd; thread = mono_thread_internal_current (); events = g_new0 (struct epoll_event, EPOLL_NEVENTS); while (1) { mono_gc_set_skip_thread (TRUE); do { if (ready == -1) { if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } ready = epoll_wait (epollfd, events, EPOLL_NEVENTS, -1); } while (ready == -1 && errno == EINTR); mono_gc_set_skip_thread (FALSE); if (ready == -1) { int err = errno; g_free (events); if (err != EBADF) g_warning ("epoll_wait: %d %s", err, g_strerror (err)); return; } EnterCriticalSection (&socket_io_data->io_lock); if (socket_io_data->inited == 3) { g_free (events); LeaveCriticalSection (&socket_io_data->io_lock); return; /* cleanup called */ } nresults = 0; for (i = 0; i < ready; i++) { int fd; MonoMList *list; MonoObject *ares; evt = &events [i]; fd = evt->data.fd; list = mono_g_hash_table_lookup (socket_io_data->sock_to_state, GINT_TO_POINTER (fd)); if (list != NULL && (evt->events & (EPOLLIN | EPOLL_ERRORS)) != 0) { ares = get_io_event (&list, MONO_POLLIN); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL && (evt->events & (EPOLLOUT | EPOLL_ERRORS)) != 0) { ares = get_io_event (&list, MONO_POLLOUT); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL) { int p; mono_g_hash_table_replace (socket_io_data->sock_to_state, GINT_TO_POINTER (fd), list); p = get_events_from_list (list); evt->events = (p & MONO_POLLOUT) ? EPOLLOUT : 0; evt->events |= (p & MONO_POLLIN) ? EPOLLIN : 0; if (epoll_ctl (epollfd, EPOLL_CTL_MOD, fd, evt) == -1) { if (epoll_ctl (epollfd, EPOLL_CTL_ADD, fd, evt) == -1) { int err = errno; g_message ("epoll(ADD): %d %s", err, g_strerror (err)); } } } else { mono_g_hash_table_remove (socket_io_data->sock_to_state, GINT_TO_POINTER (fd)); epoll_ctl (epollfd, EPOLL_CTL_DEL, fd, evt); } } LeaveCriticalSection (&socket_io_data->io_lock); threadpool_append_jobs (&async_io_tp, (MonoObject **) async_results, nresults); mono_gc_bzero (async_results, sizeof (gpointer) * nresults); } }
static void tp_kqueue_wait (gpointer p) { SocketIOData *socket_io_data; int kfd; MonoInternalThread *thread; struct kevent *events, *evt; int ready = 0, i; gpointer async_results [KQUEUE_NEVENTS * 2]; // * 2 because each loop can add up to 2 results here gint nresults; tp_kqueue_data *data; socket_io_data = p; data = socket_io_data->event_data; kfd = data->fd; thread = mono_thread_internal_current (); events = g_new0 (struct kevent, KQUEUE_NEVENTS); while (1) { mono_gc_set_skip_thread (TRUE); do { if (ready == -1) { if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } ready = kevent (kfd, NULL, 0, events, KQUEUE_NEVENTS, NULL); } while (ready == -1 && errno == EINTR); mono_gc_set_skip_thread (FALSE); if (ready == -1) { int err = errno; g_free (events); if (err != EBADF) g_warning ("kevent wait: %d %s", err, g_strerror (err)); return; } EnterCriticalSection (&socket_io_data->io_lock); if (socket_io_data->inited == 3) { g_free (events); LeaveCriticalSection (&socket_io_data->io_lock); return; /* cleanup called */ } nresults = 0; for (i = 0; i < ready; i++) { int fd; MonoMList *list; MonoObject *ares; evt = &events [i]; fd = evt->ident; list = mono_g_hash_table_lookup (socket_io_data->sock_to_state, GINT_TO_POINTER (fd)); if (list != NULL && (evt->filter == EVFILT_READ || (evt->flags & EV_ERROR) != 0)) { ares = get_io_event (&list, MONO_POLLIN); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL && (evt->filter == EVFILT_WRITE || (evt->flags & EV_ERROR) != 0)) { ares = get_io_event (&list, MONO_POLLOUT); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL) { int p; mono_g_hash_table_replace (socket_io_data->sock_to_state, GINT_TO_POINTER (fd), list); p = get_events_from_list (list); if (evt->filter == EVFILT_READ && (p & MONO_POLLIN) != 0) { EV_SET (evt, fd, EVFILT_READ, EV_ADD | EV_ENABLE | EV_ONESHOT, 0, 0, 0); kevent_change (kfd, evt, "READD read"); } if (evt->filter == EVFILT_WRITE && (p & MONO_POLLOUT) != 0) { EV_SET (evt, fd, EVFILT_WRITE, EV_ADD | EV_ENABLE | EV_ONESHOT, 0, 0, 0); kevent_change (kfd, evt, "READD write"); } } else { mono_g_hash_table_remove (socket_io_data->sock_to_state, GINT_TO_POINTER (fd)); } } LeaveCriticalSection (&socket_io_data->io_lock); threadpool_append_jobs (&async_io_tp, (MonoObject **) async_results, nresults); mono_gc_bzero (async_results, sizeof (gpointer) * nresults); } }
MonoBoolean ves_icall_System_Threading_Mutex_ReleaseMutex_internal (gpointer handle) { MonoW32HandleType type; MonoW32HandleMutex *mutex_handle; pthread_t tid; gboolean ret; if (handle == NULL) { mono_w32error_set_last (ERROR_INVALID_HANDLE); return FALSE; } switch (type = mono_w32handle_get_type (handle)) { case MONO_W32HANDLE_MUTEX: case MONO_W32HANDLE_NAMEDMUTEX: break; default: mono_w32error_set_last (ERROR_INVALID_HANDLE); return FALSE; } if (!mono_w32handle_lookup (handle, type, (gpointer *)&mutex_handle)) { g_warning ("%s: error looking up %s handle %p", __func__, mono_w32handle_get_typename (type), handle); return FALSE; } mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: releasing %s handle %p, tid: %p recursion: %d", __func__, mono_w32handle_get_typename (type), handle, (gpointer) mutex_handle->tid, mutex_handle->recursion); mono_w32handle_lock_handle (handle); tid = pthread_self (); if (mutex_handle->abandoned) { // The Win32 ReleaseMutex() function returns TRUE for abandoned mutexes ret = TRUE; } else if (!pthread_equal (mutex_handle->tid, tid)) { ret = FALSE; mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: we don't own %s handle %p (owned by %ld, me %ld)", __func__, mono_w32handle_get_typename (type), handle, (long)mutex_handle->tid, (long)tid); } else { ret = TRUE; /* OK, we own this mutex */ mutex_handle->recursion--; if (mutex_handle->recursion == 0) { thread_disown_mutex (mono_thread_internal_current (), handle); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: unlocking %s handle %p, tid: %p recusion : %d", __func__, mono_w32handle_get_typename (type), handle, (gpointer) mutex_handle->tid, mutex_handle->recursion); mutex_handle->tid = 0; mono_w32handle_set_signal_state (handle, TRUE, FALSE); } } mono_w32handle_unlock_handle (handle); return ret; }
static void async_invoke_thread (gpointer data) { MonoDomain *domain; MonoInternalThread *thread; MonoWSQ *wsq; ThreadPool *tp; gboolean must_die; const gchar *name; tp = data; wsq = NULL; if (!tp->is_io) wsq = add_wsq (); thread = mono_thread_internal_current (); mono_profiler_thread_start (thread->tid); name = (tp->is_io) ? "IO Threadpool worker" : "Threadpool worker"; mono_thread_set_name_internal (thread, mono_string_new (mono_domain_get (), name), FALSE); if (tp_start_func) tp_start_func (tp_hooks_user_data); data = NULL; for (;;) { MonoAsyncResult *ar; MonoClass *klass; gboolean is_io_task; gboolean is_socket; int n_naps = 0; is_io_task = FALSE; ar = (MonoAsyncResult *) data; if (ar) { InterlockedIncrement (&tp->busy_threads); domain = ((MonoObject *)ar)->vtable->domain; #ifndef DISABLE_SOCKETS klass = ((MonoObject *) data)->vtable->klass; is_io_task = !is_corlib_asyncresult (domain, klass); is_socket = FALSE; if (is_io_task) { MonoSocketAsyncResult *state = (MonoSocketAsyncResult *) data; is_socket = is_socketasyncresult (domain, klass); ar = state->ares; switch (state->operation) { case AIO_OP_RECEIVE: state->total = ICALL_RECV (state); break; case AIO_OP_SEND: state->total = ICALL_SEND (state); break; } } #endif /* worker threads invokes methods in different domains, * so we need to set the right domain here */ g_assert (domain); if (mono_domain_is_unloading (domain) || mono_runtime_is_shutting_down ()) { threadpool_jobs_dec ((MonoObject *)ar); data = NULL; ar = NULL; InterlockedDecrement (&tp->busy_threads); } else { mono_thread_push_appdomain_ref (domain); if (threadpool_jobs_dec ((MonoObject *)ar)) { data = NULL; ar = NULL; mono_thread_pop_appdomain_ref (); InterlockedDecrement (&tp->busy_threads); continue; } if (mono_domain_set (domain, FALSE)) { MonoObject *exc; if (tp_item_begin_func) tp_item_begin_func (tp_item_user_data); if (!is_io_task && ar->add_time > 0) process_idle_times (tp, ar->add_time); exc = mono_async_invoke (tp, ar); if (tp_item_end_func) tp_item_end_func (tp_item_user_data); if (exc) mono_internal_thread_unhandled_exception (exc); if (is_socket && tp->is_io) { MonoSocketAsyncResult *state = (MonoSocketAsyncResult *) data; if (state->completed && state->callback) { MonoAsyncResult *cb_ares; cb_ares = create_simple_asyncresult ((MonoObject *) state->callback, (MonoObject *) state); icall_append_job ((MonoObject *) cb_ares); } } mono_domain_set (mono_get_root_domain (), TRUE); } mono_thread_pop_appdomain_ref (); InterlockedDecrement (&tp->busy_threads); /* If the callee changes the background status, set it back to TRUE */ mono_thread_clr_state (thread , ~ThreadState_Background); if (!mono_thread_test_state (thread , ThreadState_Background)) ves_icall_System_Threading_Thread_SetState (thread, ThreadState_Background); } } ar = NULL; data = NULL; must_die = should_i_die (tp); if (!must_die && (tp->is_io || !mono_wsq_local_pop (&data))) dequeue_or_steal (tp, &data, wsq); n_naps = 0; while (!must_die && !data && n_naps < 4) { gboolean res; InterlockedIncrement (&tp->waiting); // Another thread may have added a job into its wsq since the last call to dequeue_or_steal // Check all the queues again before entering the wait loop dequeue_or_steal (tp, &data, wsq); if (data) { InterlockedDecrement (&tp->waiting); break; } mono_gc_set_skip_thread (TRUE); #if defined(__OpenBSD__) while (mono_cq_count (tp->queue) == 0 && (res = mono_sem_wait (&tp->new_job, TRUE)) == -1) {// && errno == EINTR) { #else while (mono_cq_count (tp->queue) == 0 && (res = mono_sem_timedwait (&tp->new_job, 2000, TRUE)) == -1) {// && errno == EINTR) { #endif if (mono_runtime_is_shutting_down ()) break; if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } InterlockedDecrement (&tp->waiting); mono_gc_set_skip_thread (FALSE); if (mono_runtime_is_shutting_down ()) break; must_die = should_i_die (tp); dequeue_or_steal (tp, &data, wsq); n_naps++; } if (!data && !tp->is_io && !mono_runtime_is_shutting_down ()) { mono_wsq_local_pop (&data); if (data && must_die) { InterlockedCompareExchange (&tp->destroy_thread, 1, 0); pulse_on_new_job (tp); } } if (!data) { gint nt; gboolean down; while (1) { nt = tp->nthreads; down = mono_runtime_is_shutting_down (); if (!down && nt <= tp->min_threads) break; if (down || InterlockedCompareExchange (&tp->nthreads, nt - 1, nt) == nt) { mono_perfcounter_update_value (tp->pc_nthreads, TRUE, -1); if (!tp->is_io) { remove_wsq (wsq); } mono_profiler_thread_end (thread->tid); if (tp_finish_func) tp_finish_func (tp_hooks_user_data); return; } } } } g_assert_not_reached (); } void ves_icall_System_Threading_ThreadPool_GetAvailableThreads (gint *workerThreads, gint *completionPortThreads) { *workerThreads = async_tp.max_threads - async_tp.busy_threads; *completionPortThreads = async_io_tp.max_threads - async_io_tp.busy_threads; }
void mono_w32mutex_abandon (void) { MonoInternalThread *internal; g_assert (mono_thread_internal_current_is_attached ()); internal = mono_thread_internal_current (); g_assert (internal); if (!internal->owned_mutexes) return; while (internal->owned_mutexes->len) { MonoW32HandleType type; MonoW32HandleMutex *mutex_handle; MonoNativeThreadId tid; gpointer handle; handle = g_ptr_array_index (internal->owned_mutexes, 0); switch (type = mono_w32handle_get_type (handle)) { case MONO_W32HANDLE_MUTEX: case MONO_W32HANDLE_NAMEDMUTEX: break; default: g_assert_not_reached (); } if (!mono_w32handle_lookup (handle, type, (gpointer *)&mutex_handle)) { g_error ("%s: error looking up %s handle %p", __func__, mono_w32handle_get_typename (type), handle); } mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: abandoning %s handle %p", __func__, mono_w32handle_get_typename (type), handle); tid = MONO_UINT_TO_NATIVE_THREAD_ID (internal->tid); if (!pthread_equal (mutex_handle->tid, tid)) g_error ("%s: trying to release mutex %p acquired by thread %p from thread %p", __func__, handle, (gpointer) mutex_handle->tid, (gpointer) tid); mono_w32handle_lock_handle (handle); mutex_handle->recursion = 0; mutex_handle->tid = 0; mutex_handle->abandoned = TRUE; mono_w32handle_set_signal_state (handle, TRUE, FALSE); thread_disown_mutex (internal, handle); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: abandoned %s handle %p", __func__, mono_w32handle_get_typename (type), handle); mono_w32handle_unlock_handle (handle); } g_ptr_array_free (internal->owned_mutexes, TRUE); internal->owned_mutexes = NULL; }