void sgen_workers_enqueue_job (JobFunc func, void *data) { int num_entries; JobQueueEntry *entry; if (!collection_needs_workers ()) { func (NULL, data); return; } g_assert (workers_state.data.gc_in_progress); entry = sgen_alloc_internal (INTERNAL_MEM_JOB_QUEUE_ENTRY); entry->func = func; entry->data = data; mono_mutex_lock (&workers_job_queue_mutex); entry->next = workers_job_queue; workers_job_queue = entry; num_entries = ++workers_job_queue_num_entries; ++workers_num_jobs_enqueued; mono_mutex_unlock (&workers_job_queue_mutex); workers_wake_up (num_entries); }
static ColorBucket* new_color_bucket (void) { ColorBucket *res = sgen_alloc_internal (INTERNAL_MEM_TARJAN_OBJ_BUCKET); res->next_data = &res->data [0]; return res; }
gboolean sgen_hash_table_replace (SgenHashTable *hash_table, gpointer key, gpointer data) { guint hash; SgenHashTableEntry *entry; rehash_if_necessary (hash_table); entry = lookup (hash_table, key, &hash); if (entry) { memcpy (entry->data, data, hash_table->data_size); return FALSE; } entry = sgen_alloc_internal (hash_table->entry_mem_type); entry->key = key; memcpy (entry->data, data, hash_table->data_size); entry->next = hash_table->table [hash]; hash_table->table [hash] = entry; hash_table->num_entries++; return TRUE; }
void sgen_workers_enqueue_job (const char *name, JobFunc func, void *data) { int num_entries; JobQueueEntry *entry; if (!collection_needs_workers ()) { func (NULL, data); return; } entry = sgen_alloc_internal (INTERNAL_MEM_JOB_QUEUE_ENTRY); entry->name = name; entry->func = func; entry->data = data; mono_mutex_lock (&workers_job_queue_mutex); entry->next = workers_job_queue; workers_job_queue = entry; num_entries = ++workers_job_queue_num_entries; ++workers_num_jobs_enqueued; mono_mutex_unlock (&workers_job_queue_mutex); if (workers_state.data.state != STATE_NURSERY_COLLECTION) workers_signal_enqueue_work_if_necessary (num_entries < workers_num ? num_entries : workers_num); }
static ObjectBucket* new_object_bucket (void) { ObjectBucket *res = sgen_alloc_internal (INTERNAL_MEM_TARJAN_OBJ_BUCKET); res->next_data = &res->data [0]; return res; }
void sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue) { GrayQueueSection *section; HEAVY_STAT (stat_gray_queue_section_alloc ++); if (queue->alloc_prepare_func) queue->alloc_prepare_func (queue); if (queue->free_list) { /* Use the previously allocated queue sections if possible */ section = queue->free_list; queue->free_list = section->next; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING); } else { /* Allocate a new section */ section = (GrayQueueSection *)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE); STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING); } section->size = SGEN_GRAY_QUEUE_SECTION_SIZE; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED); /* Link it with the others */ section->next = queue->first; queue->first = section; queue->cursor = section->entries - 1; }
static void add_generic_store_remset_from_buffer (gpointer *buffer) { GenericStoreRememberedSet *remset = sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET); memcpy (remset->data, buffer + 1, sizeof (gpointer) * (STORE_REMSET_BUFFER_SIZE - 1)); remset->next = generic_store_remsets; generic_store_remsets = remset; }
/*MUST be called with world stopped*/ SgenFragment* sgen_fragment_allocator_alloc (void) { SgenFragment *frag = fragment_freelist; if (frag) { fragment_freelist = frag->next_in_order; frag->next = frag->next_in_order = NULL; return frag; } frag = (SgenFragment *)sgen_alloc_internal (INTERNAL_MEM_FRAGMENT); frag->next = frag->next_in_order = NULL; return frag; }
static void sgen_ssb_register_thread (SgenThreadInfo *info) { #ifndef HAVE_KW_THREAD SgenThreadInfo *__thread_info__ = info; #endif info->remset = sgen_alloc_remset (DEFAULT_REMSET_SIZE, info, FALSE); mono_native_tls_set_value (remembered_set_key, info->remset); #ifdef HAVE_KW_THREAD remembered_set = info->remset; #endif STORE_REMSET_BUFFER = sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET); STORE_REMSET_BUFFER_INDEX = 0; }
void sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue, gboolean is_parallel) { GrayQueueSection *section; if (queue->free_list) { /* Use the previously allocated queue sections if possible */ section = queue->free_list; queue->free_list = section->next; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING); } else { HEAVY_STAT (stat_gray_queue_section_alloc ++); /* Allocate a new section */ section = (GrayQueueSection *)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE); STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING); } /* Section is empty */ section->size = 0; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED); /* Link it with the others */ section->next = queue->first; section->prev = NULL; if (queue->first) queue->first->prev = section; else queue->last = section; queue->first = section; queue->cursor = section->entries - 1; if (is_parallel) { mono_memory_write_barrier (); /* * FIXME * we could probably optimize the code to only rely on the write barrier * for synchronization with the stealer thread. Additionally we could also * do a write barrier once every other gray queue change, and request * to have a minimum of sections before stealing, to keep consistency. */ mono_atomic_inc_i32 (&queue->num_sections); } else { queue->num_sections++; } }
void sgen_workers_enqueue_job (JobFunc func, void *data) { int num_entries; JobQueueEntry *entry; if (!sgen_collection_is_parallel ()) { func (NULL, data); return; } entry = sgen_alloc_internal (INTERNAL_MEM_JOB_QUEUE_ENTRY); entry->func = func; entry->data = data; mono_mutex_lock (&workers_job_queue_mutex); entry->next = workers_job_queue; workers_job_queue = entry; num_entries = ++workers_job_queue_num_entries; mono_mutex_unlock (&workers_job_queue_mutex); workers_wake_up (num_entries); }
void sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue) { GrayQueueSection *section; if (queue->alloc_prepare_func) queue->alloc_prepare_func (queue); if (queue->free_list) { /* Use the previously allocated queue sections if possible */ section = queue->free_list; queue->free_list = section->next; } else { /* Allocate a new section */ section = sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE); } section->end = 0; /* Link it with the others */ section->next = queue->first; queue->first = section; }