int _thread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void), void *dso) { struct atfork_fn *af; if ((af = malloc(sizeof *af)) == NULL) return (ENOMEM); af->fn_prepare = prepare; af->fn_parent = parent; af->fn_child = child; af->fn_dso = dso; _ATFORK_LOCK(); TAILQ_INSERT_TAIL(&_atfork_list, af, fn_next); _ATFORK_UNLOCK(); return (0); }
/* * Call all handlers registered with __cxa_atexit() for the shared * object owning 'dso'. * Note: if 'dso' is NULL, then all remaining handlers are called. */ void __cxa_finalize(void *dso) { struct atexit *p, *q; struct atexit_fn fn; int n, pgsize = getpagesize(); static int call_depth; if (dso == NULL) _thread_finalize(); _ATEXIT_LOCK(); call_depth++; restart: restartloop = 0; for (p = __atexit; p != NULL; p = p->next) { for (n = p->ind; --n >= 0;) { if (p->fns[n].fn_ptr == NULL) continue; /* already called */ if (dso != NULL && dso != p->fns[n].fn_dso) continue; /* wrong DSO */ /* * Mark handler as having been already called to avoid * dupes and loops, then call the appropriate function. */ fn = p->fns[n]; if (mprotect(p, pgsize, PROT_READ | PROT_WRITE) == 0) { p->fns[n].fn_ptr = NULL; mprotect(p, pgsize, PROT_READ); } _ATEXIT_UNLOCK(); (*fn.fn_ptr)(fn.fn_arg); _ATEXIT_LOCK(); if (restartloop) goto restart; } } call_depth--; /* * If called via exit(), unmap the pages since we have now run * all the handlers. We defer this until calldepth == 0 so that * we don't unmap things prematurely if called recursively. */ if (dso == NULL && call_depth == 0) { for (p = __atexit; p != NULL; ) { q = p; p = p->next; munmap(q, pgsize); } __atexit = NULL; } _ATEXIT_UNLOCK(); /* * If unloading a DSO, unregister any atfork handlers registered * by it. Skip the locking if the list is currently empty. */ if (dso != NULL && TAILQ_FIRST(&_atfork_list) != NULL) { struct atfork_fn *af, *afnext; _ATFORK_LOCK(); TAILQ_FOREACH_SAFE(af, &_atfork_list, fn_next, afnext) if (af->fn_dso == dso) { TAILQ_REMOVE(&_atfork_list, af, fn_next); free(af); } _ATFORK_UNLOCK(); } }