static void stop_finalization_thread (void) { scm_i_pthread_mutex_lock (&finalization_thread_lock); if (finalization_thread_is_running) { notify_about_to_fork (); if (pthread_join (finalization_thread, NULL)) perror ("joining finalization thread"); finalization_thread_is_running = 0; } scm_i_pthread_mutex_unlock (&finalization_thread_lock); }
static void load_extension (SCM lib, SCM init) { extension_t *head; scm_i_pthread_mutex_lock (&ext_lock); head = registered_extensions; scm_i_pthread_mutex_unlock (&ext_lock); /* Search the registry. */ if (head != NULL) { extension_t *ext; char *clib, *cinit; int found = 0; scm_dynwind_begin (0); clib = scm_to_locale_string (lib); scm_dynwind_free (clib); cinit = scm_to_locale_string (init); scm_dynwind_free (cinit); for (ext = head; ext; ext = ext->next) if ((ext->lib == NULL || !strcmp (ext->lib, clib)) && !strcmp (ext->init, cinit)) { ext->func (ext->data); found = 1; break; } scm_dynwind_end (); if (found) return; } /* Dynamically link the library. */ #if HAVE_MODULES scm_dynamic_call (init, scm_dynamic_link (lib)); #else scm_misc_error ("load-extension", "extension ~S:~S not registered and dynamic-link disabled", scm_list_2 (init, lib)); #endif }
void scm_c_register_extension (const char *lib, const char *init, void (*func) (void *), void *data) { extension_t *ext = scm_malloc (sizeof(extension_t)); if (lib) ext->lib = scm_strdup (lib); else ext->lib = NULL; ext->init = scm_strdup (init); ext->func = func; ext->data = data; scm_i_pthread_mutex_lock (&ext_lock); ext->next = registered_extensions; registered_extensions = ext; scm_i_pthread_mutex_unlock (&ext_lock); }
static void start_finalization_thread (void) { scm_i_pthread_mutex_lock (&finalization_thread_lock); if (!finalization_thread_is_running) { /* Use the raw pthread API and scm_with_guile, because we don't want to block on any lock that scm_spawn_thread might want to take, and we don't want to inherit the dynamic state (fluids) of the caller. */ if (pthread_create (&finalization_thread, NULL, run_finalization_thread, NULL)) perror ("error creating finalization thread"); else finalization_thread_is_running = 1; } scm_i_pthread_mutex_unlock (&finalization_thread_lock); }
void scm_c_issue_deprecation_warning (const char *msg) { if (!SCM_WARN_DEPRECATED) print_summary = 1; else { struct issued_warning *iw; scm_i_pthread_mutex_lock (&warn_lock); for (iw = issued_warnings; iw; iw = iw->prev) if (!strcmp (iw->message, msg)) { msg = NULL; break; } if (msg) { msg = strdup (msg); iw = malloc (sizeof (struct issued_warning)); if (msg == NULL || iw == NULL) /* Nothing sensible to do if you can't allocate this small amount of memory. */ abort (); iw->message = msg; iw->prev = issued_warnings; issued_warnings = iw; } scm_i_pthread_mutex_unlock (&warn_lock); /* All this dance is to avoid printing to a port inside a mutex, which could recurse and deadlock. */ if (msg) { if (scm_gc_running_p) fprintf (stderr, "%s\n", msg); else { scm_puts_unlocked (msg, scm_current_warning_port ()); scm_newline (scm_current_warning_port ()); } } } }
static SCM ioscm_open_port (scm_t_bits port_type, long mode_bits) { SCM port; #if 0 /* TODO: Guile doesn't export this. What to do? */ scm_i_scm_pthread_mutex_lock (&scm_i_port_table_mutex); #endif port = scm_new_port_table_entry (port_type); SCM_SET_CELL_TYPE (port, port_type | mode_bits); #if 0 /* TODO: Guile doesn't export this. What to do? */ scm_i_pthread_mutex_unlock (&scm_i_port_table_mutex); #endif return port; }
static void resize_table (scm_t_weak_table *table) { scm_t_weak_entry *old_entries, *new_entries; int new_size_index; unsigned long old_size, new_size, old_k; do { new_size_index = compute_size_index (table); if (new_size_index == table->size_index) return; new_size = hashtable_size[new_size_index]; scm_i_pthread_mutex_unlock (&table->lock); /* Allocating memory might cause finalizers to run, which could run anything, so drop our lock to avoid deadlocks. */ new_entries = allocate_entries (new_size, table->kind); scm_i_pthread_mutex_unlock (&table->lock); } while (!is_acceptable_size_index (table, new_size_index)); old_entries = table->entries; old_size = table->size; table->size_index = new_size_index; table->size = new_size; if (new_size_index <= table->min_size_index) table->lower = 0; else table->lower = new_size / 5; table->upper = 9 * new_size / 10; table->n_items = 0; table->entries = new_entries; for (old_k = 0; old_k < old_size; old_k++) { scm_t_weak_entry copy; unsigned long new_k, distance; if (!old_entries[old_k].hash) continue; copy_weak_entry (&old_entries[old_k], ©); if (!copy.key || !copy.value) continue; new_k = hash_to_index (copy.hash, new_size); for (distance = 0; ; distance++, new_k = (new_k + 1) % new_size) { unsigned long other_hash = new_entries[new_k].hash; if (!other_hash) /* Found an empty entry. */ break; /* Displace the entry if our distance is less, otherwise keep looking. */ if (entry_distance (other_hash, new_k, new_size) < distance) { rob_from_rich (table, new_k); break; } } table->n_items++; new_entries[new_k].hash = copy.hash; new_entries[new_k].key = copy.key; new_entries[new_k].value = copy.value; register_disappearing_links (&new_entries[new_k], SCM_PACK (copy.key), SCM_PACK (copy.value), table->kind); } }
static void resize_set (scm_t_weak_set *set) { scm_t_weak_entry *old_entries, *new_entries; int new_size_index; unsigned long old_size, new_size, old_k; do { new_size_index = compute_size_index (set); if (new_size_index == set->size_index) return; new_size = hashset_size[new_size_index]; scm_i_pthread_mutex_unlock (&set->lock); /* Allocating memory might cause finalizers to run, which could run anything, so drop our lock to avoid deadlocks. */ new_entries = scm_gc_malloc_pointerless (new_size * sizeof(scm_t_weak_entry), "weak set"); scm_i_pthread_mutex_lock (&set->lock); } while (!is_acceptable_size_index (set, new_size_index)); old_entries = set->entries; old_size = set->size; memset (new_entries, 0, new_size * sizeof(scm_t_weak_entry)); set->size_index = new_size_index; set->size = new_size; if (new_size_index <= set->min_size_index) set->lower = 0; else set->lower = new_size / 5; set->upper = 9 * new_size / 10; set->n_items = 0; set->entries = new_entries; for (old_k = 0; old_k < old_size; old_k++) { scm_t_weak_entry copy; unsigned long new_k, distance; if (!old_entries[old_k].hash) continue; copy_weak_entry (&old_entries[old_k], ©); if (!copy.key) continue; new_k = hash_to_index (copy.hash, new_size); for (distance = 0; ; distance++, new_k = (new_k + 1) % new_size) { unsigned long other_hash = new_entries[new_k].hash; if (!other_hash) /* Found an empty entry. */ break; /* Displace the entry if our distance is less, otherwise keep looking. */ if (entry_distance (other_hash, new_k, new_size) < distance) { rob_from_rich (set, new_k); break; } } set->n_items++; new_entries[new_k].hash = copy.hash; new_entries[new_k].key = copy.key; if (SCM_HEAP_OBJECT_P (SCM_PACK (copy.key))) SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &new_entries[new_k].key, (GC_PTR) new_entries[new_k].key); } }