static emacs_value module_make_global_ref (emacs_env *env, emacs_value ref) { MODULE_FUNCTION_BEGIN (module_nil); struct Lisp_Hash_Table *h = XHASH_TABLE (Vmodule_refs_hash); Lisp_Object new_obj = value_to_lisp (ref); EMACS_UINT hashcode; ptrdiff_t i = hash_lookup (h, new_obj, &hashcode); if (i >= 0) { Lisp_Object value = HASH_VALUE (h, i); EMACS_INT refcount = XFASTINT (value) + 1; if (MOST_POSITIVE_FIXNUM < refcount) xsignal0 (Qoverflow_error); value = make_natnum (refcount); set_hash_value_slot (h, i, value); } else { hash_put (h, new_obj, make_natnum (1), hashcode); } return lisp_to_value (new_obj); }
static void module_free_global_ref (emacs_env *env, emacs_value ref) { /* TODO: This probably never signals. */ /* FIXME: Wait a minute. Shouldn't this function report an error if the hash lookup fails? */ MODULE_FUNCTION_BEGIN (); struct Lisp_Hash_Table *h = XHASH_TABLE (Vmodule_refs_hash); Lisp_Object obj = value_to_lisp (ref); EMACS_UINT hashcode; ptrdiff_t i = hash_lookup (h, obj, &hashcode); if (i >= 0) { Lisp_Object value = HASH_VALUE (h, i); EMACS_INT refcount = XFASTINT (value) - 1; if (refcount > 0) { value = make_natnum (refcount); set_hash_value_slot (h, i, value); } else hash_remove_from_table (h, value); } }
static void record_backtrace (log_t *log, EMACS_INT count) { Lisp_Object backtrace; ptrdiff_t index; if (!INTEGERP (log->next_free)) /* FIXME: transfer the evicted counts to a special entry rather than dropping them on the floor. */ evict_lower_half (log); index = XINT (log->next_free); /* Get a "working memory" vector. */ backtrace = HASH_KEY (log, index); get_backtrace (backtrace); { /* We basically do a `gethash+puthash' here, except that we have to be careful to avoid memory allocation since we're in a signal handler, and we optimize the code to try and avoid computing the hash+lookup twice. See fns.c:Fputhash for reference. */ EMACS_UINT hash; ptrdiff_t j = hash_lookup (log, backtrace, &hash); if (j >= 0) { EMACS_INT old_val = XINT (HASH_VALUE (log, j)); EMACS_INT new_val = saturated_add (old_val, count); set_hash_value_slot (log, j, make_number (new_val)); } else { /* BEWARE! hash_put in general can allocate memory. But currently it only does that if log->next_free is nil. */ int j; eassert (!NILP (log->next_free)); j = hash_put (log, backtrace, make_number (count), hash); /* Let's make sure we've put `backtrace' right where it already was to start with. */ eassert (index == j); /* FIXME: If the hash-table is almost full, we should set some global flag so that some Elisp code can offload its data elsewhere, so as to avoid the eviction code. There are 2 ways to do that, AFAICT: - Set a flag checked in QUIT, such that QUIT can then call Fprofiler_cpu_log and stash the full log for later use. - Set a flag check in post-gc-hook, so that Elisp code can call profiler-cpu-log. That gives us more flexibility since that Elisp code can then do all kinds of fun stuff like write the log to disk. Or turn it right away into a call tree. Of course, using Elisp is generally preferable, but it may take longer until we get a chance to run the Elisp code, so there's more risk that the table will get full before we get there. */ } } }