void foundation_finalize(void) { _foundation_initialized = false; profile_finalize(); _config_finalize(); _fs_finalize(); _stream_finalize(); _system_finalize(); _library_finalize(); _environment_finalize(); _random_finalize(); _thread_finalize(); _time_finalize(); _log_finalize(); _stacktrace_finalize(); _static_hash_finalize(); _memory_finalize(); _atomic_finalize(); }
/* * Call all handlers registered with __cxa_atexit() for the shared * object owning 'dso'. * Note: if 'dso' is NULL, then all remaining handlers are called. */ void __cxa_finalize(void *dso) { struct atexit *p, *q; struct atexit_fn fn; int n, pgsize = getpagesize(); static int call_depth; if (dso == NULL) _thread_finalize(); _ATEXIT_LOCK(); call_depth++; restart: restartloop = 0; for (p = __atexit; p != NULL; p = p->next) { for (n = p->ind; --n >= 0;) { if (p->fns[n].fn_ptr == NULL) continue; /* already called */ if (dso != NULL && dso != p->fns[n].fn_dso) continue; /* wrong DSO */ /* * Mark handler as having been already called to avoid * dupes and loops, then call the appropriate function. */ fn = p->fns[n]; if (mprotect(p, pgsize, PROT_READ | PROT_WRITE) == 0) { p->fns[n].fn_ptr = NULL; mprotect(p, pgsize, PROT_READ); } _ATEXIT_UNLOCK(); (*fn.fn_ptr)(fn.fn_arg); _ATEXIT_LOCK(); if (restartloop) goto restart; } } call_depth--; /* * If called via exit(), unmap the pages since we have now run * all the handlers. We defer this until calldepth == 0 so that * we don't unmap things prematurely if called recursively. */ if (dso == NULL && call_depth == 0) { for (p = __atexit; p != NULL; ) { q = p; p = p->next; munmap(q, pgsize); } __atexit = NULL; } _ATEXIT_UNLOCK(); /* * If unloading a DSO, unregister any atfork handlers registered * by it. Skip the locking if the list is currently empty. */ if (dso != NULL && TAILQ_FIRST(&_atfork_list) != NULL) { struct atfork_fn *af, *afnext; _ATFORK_LOCK(); TAILQ_FOREACH_SAFE(af, &_atfork_list, fn_next, afnext) if (af->fn_dso == dso) { TAILQ_REMOVE(&_atfork_list, af, fn_next); free(af); } _ATFORK_UNLOCK(); } }