static void ensure_toggleref_capacity (int capacity) { if (!toggleref_array) { toggleref_array_capacity = 32; toggleref_array = sgen_alloc_internal_dynamic ( toggleref_array_capacity * sizeof (MonoGCToggleRef), INTERNAL_MEM_TOGGLEREF_DATA, TRUE); } if (toggleref_array_size + capacity >= toggleref_array_capacity) { MonoGCToggleRef *tmp; int old_capacity = toggleref_array_capacity; while (toggleref_array_capacity < toggleref_array_size + capacity) toggleref_array_capacity *= 2; tmp = sgen_alloc_internal_dynamic ( toggleref_array_capacity * sizeof (MonoGCToggleRef), INTERNAL_MEM_TOGGLEREF_DATA, TRUE); memcpy (tmp, toggleref_array, toggleref_array_size * sizeof (MonoGCToggleRef)); sgen_free_internal_dynamic (toggleref_array, old_capacity * sizeof (MonoGCToggleRef), INTERNAL_MEM_TOGGLEREF_DATA); toggleref_array = tmp; } }
/* * Allocate a new section of memory to be used as old generation. */ static GCMemSection* alloc_major_section (void) { GCMemSection *section; int scan_starts; section = sgen_alloc_os_memory_aligned (MAJOR_SECTION_SIZE, MAJOR_SECTION_SIZE, TRUE); section->next_data = section->data = (char*)section + SGEN_SIZEOF_GC_MEM_SECTION; g_assert (!((mword)section->data & 7)); section->size = MAJOR_SECTION_SIZE - SGEN_SIZEOF_GC_MEM_SECTION; section->end_data = section->data + section->size; sgen_update_heap_boundaries ((mword)section->data, (mword)section->end_data); DEBUG (3, fprintf (gc_debug_file, "New major heap section: (%p-%p), total: %lld\n", section->data, section->end_data, (long long int)mono_gc_get_heap_size ())); scan_starts = (section->size + SGEN_SCAN_START_SIZE - 1) / SGEN_SCAN_START_SIZE; section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS); section->num_scan_start = scan_starts; section->block.role = MEMORY_ROLE_GEN1; section->is_to_space = TRUE; /* add to the section list */ section->block.next = section_list; section_list = section; ++num_major_sections; return section; }
/* * Flushing buffers takes an exclusive lock, so it must only be done when the world is * stopped, otherwise we might end up with a deadlock because a stopped thread owns the * lock. * * The protocol entries that do flush have `FLUSH()` in their definition. */ void binary_protocol_flush_buffers (gboolean force) { #ifdef HAVE_UNISTD_H int num_buffers = 0, i; BinaryProtocolBuffer *buf; BinaryProtocolBuffer **bufs; if (binary_protocol_file == -1) return; if (!force && !try_lock_exclusive ()) return; for (buf = binary_protocol_buffers; buf != NULL; buf = buf->next) ++num_buffers; bufs = (BinaryProtocolBuffer **)sgen_alloc_internal_dynamic (num_buffers * sizeof (BinaryProtocolBuffer*), INTERNAL_MEM_BINARY_PROTOCOL, TRUE); for (buf = binary_protocol_buffers, i = 0; buf != NULL; buf = buf->next, i++) bufs [i] = buf; SGEN_ASSERT (0, i == num_buffers, "Binary protocol buffer count error"); binary_protocol_buffers = NULL; for (i = num_buffers - 1; i >= 0; --i) { binary_protocol_flush_buffer (bufs [i]); binary_protocol_check_file_overflow (); } sgen_free_internal_dynamic (buf, num_buffers * sizeof (BinaryProtocolBuffer*), INTERNAL_MEM_BINARY_PROTOCOL); if (!force) unlock_exclusive (); #endif }
void sgen_workers_init (int num_workers) { int i; void **workers_data_ptrs = alloca(num_workers * sizeof(void *)); if (!sgen_get_major_collector ()->is_concurrent) { sgen_thread_pool_init (num_workers, thread_pool_init_func, NULL, NULL, NULL); return; } //g_print ("initing %d workers\n", num_workers); workers_num = num_workers; workers_data = sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA, TRUE); memset (workers_data, 0, sizeof (WorkerData) * num_workers); init_distribute_gray_queue (); for (i = 0; i < workers_num; ++i) workers_data_ptrs [i] = (void *) &workers_data [i]; sgen_thread_pool_init (num_workers, thread_pool_init_func, marker_idle_func, continue_idle_func, workers_data_ptrs); mono_counters_register ("# workers finished", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_workers_num_finished); }
void sgen_pin_stats_register_address (char *addr, int pin_type) { PinStatAddress **node_ptr = &pin_stat_addresses; PinStatAddress *node; int pin_type_bit = 1 << pin_type; while (*node_ptr) { node = *node_ptr; if (addr == node->addr) { node->pin_types |= pin_type_bit; return; } if (addr < node->addr) node_ptr = &node->left; else node_ptr = &node->right; } node = sgen_alloc_internal_dynamic (sizeof (PinStatAddress), INTERNAL_MEM_STATISTICS, TRUE); node->addr = addr; node->pin_types = pin_type_bit; node->left = node->right = NULL; *node_ptr = node; }
static void rehash (SgenHashTable *hash_table) { SgenHashTableEntry **old_hash = hash_table->table; guint old_hash_size = hash_table->size; guint i, hash, new_size; SgenHashTableEntry **new_hash; SgenHashTableEntry *entry, *next; if (!old_hash) { sgen_register_fixed_internal_mem_type (hash_table->entry_mem_type, sizeof (SgenHashTableEntry*) + sizeof (gpointer) + hash_table->data_size); new_size = 13; } else { new_size = g_spaced_primes_closest (hash_table->num_entries); } new_hash = sgen_alloc_internal_dynamic (new_size * sizeof (SgenHashTableEntry*), hash_table->table_mem_type); for (i = 0; i < old_hash_size; ++i) { for (entry = old_hash [i]; entry; entry = next) { hash = hash_table->hash_func (entry->key) % new_size; next = entry->next; entry->next = new_hash [hash]; new_hash [hash] = entry; } } sgen_free_internal_dynamic (old_hash, old_hash_size * sizeof (SgenHashTableEntry*), hash_table->table_mem_type); hash_table->table = new_hash; hash_table->size = new_size; }
SgenThreadPoolJob* sgen_thread_pool_job_alloc (const char *name, SgenThreadPoolJobFunc func, size_t size) { SgenThreadPoolJob *job = sgen_alloc_internal_dynamic (size, INTERNAL_MEM_THREAD_POOL_JOB, TRUE); job->name = name; job->size = size; job->state = STATE_WAITING; job->func = func; return job; }
/* FIXME: later choose a size that takes into account the RememberedSet struct * and doesn't waste any alloc paddin space. */ static RememberedSet* sgen_alloc_remset (int size, gpointer id, gboolean global) { RememberedSet* res = sgen_alloc_internal_dynamic (sizeof (RememberedSet) + (size * sizeof (gpointer)), INTERNAL_MEM_REMSET, TRUE); res->store_next = res->data; res->end_set = res->data + size; res->next = NULL; SGEN_LOG (4, "Allocated%s remset size %d at %p for %p", global ? " global" : "", size, res->data, id); return res; }
static void realloc_pin_queue (void) { int new_size = pin_queue_size? pin_queue_size + pin_queue_size/2: 1024; void **new_pin = sgen_alloc_internal_dynamic (sizeof (void*) * new_size, INTERNAL_MEM_PIN_QUEUE, TRUE); memcpy (new_pin, pin_queue, sizeof (void*) * next_pin_slot); sgen_free_internal_dynamic (pin_queue, sizeof (void*) * pin_queue_size, INTERNAL_MEM_PIN_QUEUE); pin_queue = new_pin; pin_queue_size = new_size; SGEN_LOG (4, "Reallocated pin queue to size: %d", new_size); }
static char* filename_for_index (int index) { char *filename; SGEN_ASSERT (0, file_size_limit > 0, "Indexed binary protocol filename must only be used with file size limit"); filename = (char *)sgen_alloc_internal_dynamic (strlen (filename_or_prefix) + 32, INTERNAL_MEM_BINARY_PROTOCOL, TRUE); sprintf (filename, "%s.%d", filename_or_prefix, index); return filename; }
void binary_protocol_init (const char *filename, long long limit) { #ifdef HAVE_UNISTD_H filename_or_prefix = sgen_alloc_internal_dynamic (strlen (filename) + 1, INTERNAL_MEM_BINARY_PROTOCOL, TRUE); strcpy (filename_or_prefix, filename); file_size_limit = limit; binary_protocol_open_file (); #endif }
static void remset_stats (void) { RememberedSet *remset; int size = 0; SgenThreadInfo *info; mword *addresses, *bumper, *p, *r; FOREACH_THREAD (info) { for (remset = info->remset; remset; remset = remset->next) size += remset->store_next - remset->data; } END_FOREACH_THREAD for (remset = freed_thread_remsets; remset; remset = remset->next) size += remset->store_next - remset->data; for (remset = global_remset; remset; remset = remset->next) size += remset->store_next - remset->data; bumper = addresses = sgen_alloc_internal_dynamic (sizeof (mword) * size, INTERNAL_MEM_STATISTICS, TRUE); FOREACH_THREAD (info) { for (remset = info->remset; remset; remset = remset->next) bumper = collect_store_remsets (remset, bumper); } END_FOREACH_THREAD for (remset = global_remset; remset; remset = remset->next) bumper = collect_store_remsets (remset, bumper); for (remset = freed_thread_remsets; remset; remset = remset->next) bumper = collect_store_remsets (remset, bumper); g_assert (bumper <= addresses + size); stat_store_remsets += bumper - addresses; sgen_sort_addresses ((void**)addresses, bumper - addresses); p = addresses; r = addresses + 1; while (r < bumper) { if (*r != *p) *++p = *r; ++r; } stat_store_remsets_unique += p - addresses; sgen_free_internal_dynamic (addresses, sizeof (mword) * size, INTERNAL_MEM_STATISTICS); }
void sgen_workers_init (int num_workers) { int i; if (!sgen_get_major_collector ()->is_parallel) return; //g_print ("initing %d workers\n", num_workers); workers_num = num_workers; workers_data = sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA, TRUE); memset (workers_data, 0, sizeof (WorkerData) * num_workers); MONO_SEM_INIT (&workers_waiting_sem, 0); MONO_SEM_INIT (&workers_done_sem, 0); sgen_gray_object_queue_init_with_alloc_prepare (&workers_distribute_gray_queue, workers_gray_queue_share_redirect, &workers_gc_thread_data); mono_mutex_init (&workers_gc_thread_data.stealable_stack_mutex, NULL); workers_gc_thread_data.stealable_stack_fill = 0; if (sgen_get_major_collector ()->alloc_worker_data) workers_gc_thread_data.major_collector_data = sgen_get_major_collector ()->alloc_worker_data (); for (i = 0; i < workers_num; ++i) { /* private gray queue is inited by the thread itself */ mono_mutex_init (&workers_data [i].stealable_stack_mutex, NULL); workers_data [i].stealable_stack_fill = 0; if (sgen_get_major_collector ()->alloc_worker_data) workers_data [i].major_collector_data = sgen_get_major_collector ()->alloc_worker_data (); } LOCK_INIT (workers_job_queue_mutex); sgen_register_fixed_internal_mem_type (INTERNAL_MEM_JOB_QUEUE_ENTRY, sizeof (JobQueueEntry)); mono_counters_register ("Stolen from self lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_lock); mono_counters_register ("Stolen from self no lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_no_lock); mono_counters_register ("Stolen from others", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_others); mono_counters_register ("# workers waited", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_num_waited); }
static void dyn_array_ensure_capacity (DynArray *da, int capacity) { int old_capacity = da->capacity; char *new_data; if (capacity <= old_capacity) return; if (da->capacity == 0) da->capacity = 2; while (capacity > da->capacity) da->capacity *= 2; new_data = sgen_alloc_internal_dynamic (da->elem_size * da->capacity, INTERNAL_MEM_BRIDGE_DATA, TRUE); memcpy (new_data, da->data, da->elem_size * da->size); sgen_free_internal_dynamic (da->data, da->elem_size * old_capacity, INTERNAL_MEM_BRIDGE_DATA); da->data = new_data; }
/* * Flushing buffers takes an exclusive lock, so it must only be done when the world is * stopped, otherwise we might end up with a deadlock because a stopped thread owns the * lock. * * The protocol entries that do flush have `FLUSH()` in their definition. */ gboolean binary_protocol_flush_buffers (gboolean force) { #ifdef HAVE_UNISTD_H int num_buffers = 0, i; BinaryProtocolBuffer *header; BinaryProtocolBuffer *buf; BinaryProtocolBuffer **bufs; if (binary_protocol_file == -1) return FALSE; if (!force && !try_lock_exclusive ()) return FALSE; header = binary_protocol_buffers; for (buf = header; buf != NULL; buf = buf->next) ++num_buffers; bufs = (BinaryProtocolBuffer **)sgen_alloc_internal_dynamic (num_buffers * sizeof (BinaryProtocolBuffer*), INTERNAL_MEM_BINARY_PROTOCOL, TRUE); for (buf = header, i = 0; buf != NULL; buf = buf->next, i++) bufs [i] = buf; SGEN_ASSERT (0, i == num_buffers, "Binary protocol buffer count error"); /* * This might be incorrect when forcing, but all bets are off in that case, anyway, * because we're trying to figure out a bug in the debugger. */ binary_protocol_buffers = NULL; for (i = num_buffers - 1; i >= 0; --i) { binary_protocol_flush_buffer (bufs [i]); binary_protocol_check_file_overflow (); } sgen_free_internal_dynamic (buf, num_buffers * sizeof (BinaryProtocolBuffer*), INTERNAL_MEM_BINARY_PROTOCOL); if (!force) unlock_exclusive (); return TRUE; #endif }
void sgen_workers_init (int num_workers) { int i; if (!sgen_get_major_collector ()->is_concurrent) return; //g_print ("initing %d workers\n", num_workers); workers_num = num_workers; workers_data = sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA, TRUE); memset (workers_data, 0, sizeof (WorkerData) * num_workers); MONO_SEM_INIT (&workers_waiting_sem, 0); MONO_SEM_INIT (&workers_done_sem, 0); init_distribute_gray_queue (sgen_get_major_collector ()->is_concurrent); if (sgen_get_major_collector ()->alloc_worker_data) workers_gc_thread_major_collector_data = sgen_get_major_collector ()->alloc_worker_data (); for (i = 0; i < workers_num; ++i) { workers_data [i].index = i; if (sgen_get_major_collector ()->alloc_worker_data) workers_data [i].major_collector_data = sgen_get_major_collector ()->alloc_worker_data (); } LOCK_INIT (workers_job_queue_mutex); sgen_register_fixed_internal_mem_type (INTERNAL_MEM_JOB_QUEUE_ENTRY, sizeof (JobQueueEntry)); mono_counters_register ("Stolen from self lock", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_workers_stolen_from_self_lock); mono_counters_register ("Stolen from self no lock", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_workers_stolen_from_self_no_lock); mono_counters_register ("Stolen from others", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_workers_stolen_from_others); mono_counters_register ("# workers waited", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_workers_num_waited); }
static void processing_build_callback_data (int generation) { int j, api_index; MonoGCBridgeSCC **api_sccs; MonoGCBridgeXRef *api_xrefs; gint64 curtime; ColorBucket *cur; g_assert (bridge_processor->num_sccs == 0 && bridge_processor->num_xrefs == 0); g_assert (!bridge_processor->api_sccs && !bridge_processor->api_xrefs); if (!dyn_array_ptr_size (®istered_bridges)) return; SGEN_TV_GETTIME (curtime); /*create API objects */ #if defined (DUMP_GRAPH) printf ("***** API *****\n"); printf ("number of SCCs %d\n", num_colors_with_bridges); #endif /* This is a straightforward translation from colors to the bridge callback format. */ api_sccs = sgen_alloc_internal_dynamic (sizeof (MonoGCBridgeSCC*) * num_colors_with_bridges, INTERNAL_MEM_BRIDGE_DATA, TRUE); api_index = xref_count = 0; for (cur = root_color_bucket; cur; cur = cur->next) { ColorData *cd; for (cd = &cur->data [0]; cd < cur->next_data; ++cd) { int bridges = dyn_array_ptr_size (&cd->bridges); if (!bridges) continue; api_sccs [api_index] = sgen_alloc_internal_dynamic (sizeof (MonoGCBridgeSCC) + sizeof (MonoObject*) * bridges, INTERNAL_MEM_BRIDGE_DATA, TRUE); api_sccs [api_index]->is_alive = FALSE; api_sccs [api_index]->num_objs = bridges; cd->api_index = api_index; for (j = 0; j < bridges; ++j) api_sccs [api_index]->objs [j] = dyn_array_ptr_get (&cd->bridges, j); api_index++; } } scc_setup_time = step_timer (&curtime); for (cur = root_color_bucket; cur; cur = cur->next) { ColorData *cd; for (cd = &cur->data [0]; cd < cur->next_data; ++cd) { int bridges = dyn_array_ptr_size (&cd->bridges); if (!bridges) continue; dyn_array_ptr_set_size (&color_merge_array, 0); gather_xrefs (cd); reset_xrefs (cd); dyn_array_ptr_set_all (&cd->other_colors, &color_merge_array); xref_count += dyn_array_ptr_size (&cd->other_colors); } } gather_xref_time = step_timer (&curtime); #if defined (DUMP_GRAPH) printf ("TOTAL XREFS %d\n", xref_count); dump_color_table (" after xref pass", TRUE); #endif api_xrefs = sgen_alloc_internal_dynamic (sizeof (MonoGCBridgeXRef) * xref_count, INTERNAL_MEM_BRIDGE_DATA, TRUE); api_index = 0; for (cur = root_color_bucket; cur; cur = cur->next) { ColorData *src; for (src = &cur->data [0]; src < cur->next_data; ++src) { int bridges = dyn_array_ptr_size (&src->bridges); if (!bridges) continue; for (j = 0; j < dyn_array_ptr_size (&src->other_colors); ++j) { ColorData *dest = dyn_array_ptr_get (&src->other_colors, j); g_assert (dyn_array_ptr_size (&dest->bridges)); /* We flattened the color graph, so this must never happen. */ api_xrefs [api_index].src_scc_index = src->api_index; api_xrefs [api_index].dst_scc_index = dest->api_index; ++api_index; } } } g_assert (xref_count == api_index); xref_setup_time = step_timer (&curtime); #if defined (DUMP_GRAPH) printf ("---xrefs:\n"); for (i = 0; i < xref_count; ++i) printf ("\t%d -> %d\n", api_xrefs [i].src_scc_index, api_xrefs [i].dst_scc_index); #endif //FIXME move half of the cleanup to before the bridge callback? bridge_processor->num_sccs = num_colors_with_bridges; bridge_processor->api_sccs = api_sccs; bridge_processor->num_xrefs = xref_count; bridge_processor->api_xrefs = api_xrefs; }
static gboolean sgen_compare_bridge_processor_results (SgenBridgeProcessor *a, SgenBridgeProcessor *b) { int i; SgenHashTable obj_to_a_scc = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_BRIDGE_DEBUG, INTERNAL_MEM_BRIDGE_DEBUG, sizeof (int), mono_aligned_addr_hash, NULL); SgenHashTable b_scc_to_a_scc = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_BRIDGE_DEBUG, INTERNAL_MEM_BRIDGE_DEBUG, sizeof (int), g_direct_hash, NULL); MonoGCBridgeXRef *a_xrefs, *b_xrefs; size_t xrefs_alloc_size; // dump_processor_state (a); // dump_processor_state (b); if (a->num_sccs != b->num_sccs) g_error ("SCCS count expected %d but got %d", a->num_sccs, b->num_sccs); if (a->num_xrefs != b->num_xrefs) g_error ("SCCS count expected %d but got %d", a->num_xrefs, b->num_xrefs); /* * First we build a hash of each object in `a` to its respective SCC index within * `a`. Along the way we also assert that no object is more than one SCC. */ for (i = 0; i < a->num_sccs; ++i) { int j; MonoGCBridgeSCC *scc = a->api_sccs [i]; g_assert (scc->num_objs > 0); for (j = 0; j < scc->num_objs; ++j) { GCObject *obj = scc->objs [j]; gboolean new_entry = sgen_hash_table_replace (&obj_to_a_scc, obj, &i, NULL); g_assert (new_entry); } } /* * Now we check whether each of the objects in `b` are in `a`, and whether the SCCs * of `b` contain the same sets of objects as those of `a`. * * While we're doing this, build a hash table to map from `b` SCC indexes to `a` SCC * indexes. */ for (i = 0; i < b->num_sccs; ++i) { MonoGCBridgeSCC *scc = b->api_sccs [i]; MonoGCBridgeSCC *a_scc; int *a_scc_index_ptr; int a_scc_index; int j; gboolean new_entry; g_assert (scc->num_objs > 0); a_scc_index_ptr = (int *)sgen_hash_table_lookup (&obj_to_a_scc, scc->objs [0]); g_assert (a_scc_index_ptr); a_scc_index = *a_scc_index_ptr; //g_print ("A SCC %d -> B SCC %d\n", a_scc_index, i); a_scc = a->api_sccs [a_scc_index]; g_assert (a_scc->num_objs == scc->num_objs); for (j = 1; j < scc->num_objs; ++j) { a_scc_index_ptr = (int *)sgen_hash_table_lookup (&obj_to_a_scc, scc->objs [j]); g_assert (a_scc_index_ptr); g_assert (*a_scc_index_ptr == a_scc_index); } new_entry = sgen_hash_table_replace (&b_scc_to_a_scc, GINT_TO_POINTER (i), &a_scc_index, NULL); g_assert (new_entry); } /* * Finally, check that we have the same xrefs. We do this by making copies of both * xref arrays, and replacing the SCC indexes in the copy for `b` with the * corresponding indexes in `a`. Then we sort both arrays and assert that they're * the same. * * At the same time, check that no xref is self-referential and that there are no * duplicate ones. */ xrefs_alloc_size = a->num_xrefs * sizeof (MonoGCBridgeXRef); a_xrefs = (MonoGCBridgeXRef *)sgen_alloc_internal_dynamic (xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG, TRUE); b_xrefs = (MonoGCBridgeXRef *)sgen_alloc_internal_dynamic (xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG, TRUE); memcpy (a_xrefs, a->api_xrefs, xrefs_alloc_size); for (i = 0; i < b->num_xrefs; ++i) { MonoGCBridgeXRef *xref = &b->api_xrefs [i]; int *scc_index_ptr; g_assert (xref->src_scc_index != xref->dst_scc_index); scc_index_ptr = (int *)sgen_hash_table_lookup (&b_scc_to_a_scc, GINT_TO_POINTER (xref->src_scc_index)); g_assert (scc_index_ptr); b_xrefs [i].src_scc_index = *scc_index_ptr; scc_index_ptr = (int *)sgen_hash_table_lookup (&b_scc_to_a_scc, GINT_TO_POINTER (xref->dst_scc_index)); g_assert (scc_index_ptr); b_xrefs [i].dst_scc_index = *scc_index_ptr; } qsort (a_xrefs, a->num_xrefs, sizeof (MonoGCBridgeXRef), compare_xrefs); qsort (b_xrefs, a->num_xrefs, sizeof (MonoGCBridgeXRef), compare_xrefs); for (i = 0; i < a->num_xrefs; ++i) { g_assert (a_xrefs [i].src_scc_index == b_xrefs [i].src_scc_index); g_assert (a_xrefs [i].dst_scc_index == b_xrefs [i].dst_scc_index); } sgen_hash_table_clean (&obj_to_a_scc); sgen_hash_table_clean (&b_scc_to_a_scc); sgen_free_internal_dynamic (a_xrefs, xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG); sgen_free_internal_dynamic (b_xrefs, xrefs_alloc_size, INTERNAL_MEM_BRIDGE_DEBUG); return TRUE; }