static void reset_finalizer_tree(GCTYPE *gc) /* After a GC, move gen0 finalizers to the old finalizer list. Note that the old gen0 splay tree is otherwise broken, since object addresses have moved. */ { Fnl *fnl, *next; fnl = gc->gen0_finalizers; gc->gen0_finalizers = NULL; gc->splayed_gen0_finalizers = NULL; for (; fnl; fnl = next) { next = fnl->next; /* Checking both `fnl` and `fnl->p` is redundant, since `fnl` is always allocated after `fnl->p`, but check both just in case the order of allocation somehow changes in the future. */ if (is_in_generation_half(gc, fnl) || is_in_generation_half(gc, fnl->f) || is_in_generation_half(gc, fnl->p) || is_in_generation_half(gc, fnl->data)) add_finalizer(fnl, 1, gc); else add_finalizer(fnl, 0, gc); } }
void FinalizerThread::extension_finalizer(STATE, Object* obj, FinalizerFunction func) { if(finishing_) return; UnmanagedPhase unmanaged(state); std::lock_guard<std::mutex> guard(list_mutex()); add_finalizer(state, new ExtensionFinalizer(state, obj, func)); }
static void merge_finalizer_trees(GCTYPE *gc) /* For a full GC, move all finalizers to the gen0 list */ { Fnl *fnl, *next; for (fnl = gc->finalizers; fnl; fnl = next) { next = fnl->next; add_finalizer(fnl, 1, gc); } gc->finalizers = NULL; gc->splayed_finalizers = NULL; }
static void reset_finalizer_tree(GCTYPE *gc) /* After a GC, move gen0 finalizers to the old finalizer list. Note that the old gen0 splay tree is otherwise broken, since object addresses have moved. */ { Fnl *fnl, *next; fnl = gc->gen0_finalizers; gc->gen0_finalizers = NULL; gc->splayed_gen0_finalizers = NULL; for (; fnl; fnl = next) { next = fnl->next; if (is_in_gen_half(fnl, gc) || is_in_gen_half(fnl->f, gc) || is_in_gen_half(fnl->data, gc)) add_finalizer(fnl, 1, gc); else add_finalizer(fnl, 0, gc); } }
void FinalizerThread::managed_finalizer(STATE, Object* obj, Object* finalizer) { if(finishing_) return; /* This method will be called by a managed thread during a managed * phase. We acquire this list mutex *while managed* to prevent * garbage collection from running, since that is the only place that * the inversion of managed phase and locking this mutex can occur. * * Since Ruby allows any number of finalizers on a single object as * long as the finalizer "callable" is different, we have to do a * complex comparison to determine if the "callable" is different. * This must be done during a managed phase even if it were not a * method send because it works with managed objects. */ std::lock_guard<std::mutex> guard(list_mutex()); for(FinalizerObjects::iterator i = live_list_.begin(); i != live_list_.end(); /* advance is handled in the loop */) { FinalizerObject* fo = *i; if(fo->match_p(state, obj, finalizer)) { if(finalizer->nil_p()) { i = live_list_.erase(i); continue; } else { return; } } ++i; } if(finalizer->nil_p()) return; /* Rubinius specific API. If the finalizer is the object, we're going to * send the object __finalize__. We mark that the user wants this by * putting cTrue as the ruby_finalizer. */ add_finalizer(state, new ManagedFinalizer(state, obj, obj == finalizer ? cTrue : finalizer)); }
void ev_add(at *handler, at *event, const char *desc, int mods) { if (handler && event) { at *p; at *d = NIL; if (mods == (unsigned char)mods) d = NEW_NUMBER(mods); if (desc && d) d = cons(new_gptr((gptr)desc), d); else if (desc) d = new_gptr((gptr)desc); LOCK(event); p = cons(new_gptr(handler),cons(d,event)); add_finalizer(handler, ev_finalize, 0); tail->Cdr = cons(p,NIL); tail = tail->Cdr; } }
static void * timer_add_sub(at *handler, int sec, int msec, int period) { struct event_timer *ti; if (handler) { add_finalizer(handler, ti_finalize, 0); if (! (ti = malloc(sizeof(struct event_timer)))) error(NIL,"Out of memory", NIL); ti->date.sec = sec; ti->date.msec = msec; ti->period.sec = period/1000; ti->period.msec = period%1000; ti->handler = handler; ti->next = 0; ti_insert(ti); return ti; } return 0; }
void GC_set_finalizer(void *p, int tagged, int level, void (*f)(void *p, void *data), void *data, void (**oldf)(void *p, void *data), void **olddata) { GCTYPE *gc = GC_get_GC(); Fnl *fnl; if (!is_finalizable_page(gc, p)) { /* Never collected. Don't finalize it. */ if (oldf) *oldf = NULL; if (olddata) *olddata = NULL; return; } gc->splayed_gen0_finalizers = fnl_splay((intptr_t)p, gc->splayed_gen0_finalizers); fnl = gc->splayed_gen0_finalizers; if (!fnl || (fnl->p != p)) { gc->splayed_finalizers = fnl_splay((intptr_t)p, gc->splayed_finalizers); fnl = gc->splayed_finalizers; if (!fnl || (fnl->p != p)) fnl = NULL; else { /* since we're mutating this finalizer, move it to the gen0 list and tree */ remove_finalizer(fnl, 0, gc); add_finalizer(fnl, 1, gc); } } if (fnl && (fnl->p == p)) { if (oldf) *oldf = fnl->f; if (olddata) *olddata = fnl->data; if (f) { fnl->f = f; fnl->data = data; fnl->eager_level = level; } else { /* remove finalizer */ remove_finalizer(fnl, 1, gc); --gc->num_fnls; } return; } if (oldf) *oldf = NULL; if (olddata) *olddata = NULL; if (!f) return; /* Allcation might trigger GC, so we use park: */ CHECK_PARK_UNUSED(gc); gc->park[0] = p; gc->park[1] = data; fnl = (Fnl *)GC_malloc_atomic(sizeof(Fnl)); memset(fnl, 0, sizeof(Fnl)); p = gc->park[0]; data = gc->park[1]; gc->park[0] = NULL; gc->park[1] = NULL; fnl->p = p; fnl->f = f; fnl->data = data; fnl->eager_level = level; fnl->tagged = tagged; #if CHECKS { MPage *m; m = find_page(p); if (tagged) { if ((m->type != MTYPE_TAGGED) || (m->type != MTYPE_PAIR)) { GCPRINT(GCOUTF, "Not tagged: %lx (%d)\n", (intptr_t)p, m->type); CRASH(4); } } } #endif add_finalizer(fnl, 1, gc); gc->num_fnls++; }
void scheme_register_finalizer(void *p, void (*f)(void *p, void *data), void *data, void (**oldf)(void *p, void *data), void **olddata) { add_finalizer(p, f, data, 0, 1, oldf, olddata, 0, 0); }
void scheme_add_scheme_finalizer_once(void *p, void (*f)(void *p, void *data), void *data) { add_finalizer(p, f, data, 0, 0, NULL, NULL, 1, 0); }
void scheme_subtract_finalizer(void *p, void (*f)(void *p, void *data), void *data) { add_finalizer(p, f, data, 1, 0, NULL, NULL, 1, 1); }