Exemplo n.º 1
0
// Grow the refs list. Rehashes the entries.
static void grow_refs(weak_referrer_array_t *list)
{
    unsigned old_num_allocated = list->num_allocated;
    unsigned num_refs = list->num_refs;
    weak_referrer_t *old_refs = list->refs;
    unsigned new_allocated;
    if (old_num_allocated == 0) {
        new_allocated = 1;
    } else if (old_num_allocated == 1) {
        new_allocated = 2;
    } else {
        new_allocated = old_num_allocated + old_num_allocated - 1;
    }
    list->refs = aux_calloc(new_allocated, sizeof(weak_referrer_t));
    list->num_allocated = new_allocated;
    list->num_refs = 0;
    list->max_hash_displacement = 0;
    
    unsigned i;
    for (i=0; i < old_num_allocated && num_refs > 0; i++) {
        if (old_refs[i].referrer != NULL) {
            append_referrer_no_lock(list, old_refs[i].referrer, old_refs[i].block);
            num_refs--;
        }
    }
    if (old_refs)
        aux_free(old_refs);
}
 static void finalize_work(Zone *zone, const size_t garbage_count, void *garbage[]) {
     size_t blocks_freed = 0, bytes_freed = 0;
     zone->invalidate_garbage(garbage_count, garbage);
     zone->free_garbage(garbage_count, garbage, 0, NULL, blocks_freed, bytes_freed);  // TODO:  all blocks are in the small admin, create a batched version.
     zone->clear_zombies();
     aux_free(garbage);
 }
Exemplo n.º 3
0
// Remove entry from the zone's table of weak references, and rehash
// Does not update num_weak_refs.
static void weak_entry_remove_no_lock(azone_t *azone, weak_entry_t *entry)
{
    // remove entry
    entry->referent = NULL;
    if (entry->referrers.refs) aux_free(entry->referrers.refs);
    entry->referrers.refs = NULL;
    entry->referrers.num_refs = 0;
    entry->referrers.num_allocated = 0;

    // rehash after entry
    weak_entry_t *table = azone->weak_refs_table;
    unsigned table_size = azone->max_weak_refs;
    unsigned hash_index = entry - table;
    unsigned index = hash_index;

    if (!table) return;

    do {
        index++; if (index == table_size) index = 0;
        if (!table[index].referent) return;
        weak_entry_t entry = table[index];
        table[index].referent = NULL;
        weak_entry_insert_no_lock(azone, &entry);
    } while (index != hash_index);
}
Exemplo n.º 4
0
    static void finalize_work(void *args) {
        garbage_list *list = (garbage_list *)args;
        size_t blocks_freed = 0, bytes_freed = 0;
        Zone *zone = list->zone;
        zone->invalidate_garbage(list->count, list->garbage);
        zone->free_garbage(false, list->count, list->garbage, blocks_freed, bytes_freed);  // TODO:  all blocks are in the small admin, create a batched version.
        zone->clear_zombies();
        aux_free(list);

#if DEBUG
        __sync_add_and_fetch(&blocks_scavenged_globally, blocks_freed);
        __sync_add_and_fetch(&bytes_scavenged_globally, bytes_freed);
#endif
    }
Exemplo n.º 5
0
// Grow the given zone's table of weak references if it is full.
static void weak_grow_maybe_no_lock(azone_t *azone)
{
    if (azone->num_weak_refs >= azone->max_weak_refs * 3 / 4) {
        // grow table
        unsigned old_max = azone->max_weak_refs;
        unsigned new_max = old_max ? old_max * 2 + 1 : 15;
        weak_entry_t *old_entries = azone->weak_refs_table;
        weak_entry_t *new_entries = aux_calloc(new_max, sizeof(weak_entry_t));
        azone->max_weak_refs = new_max;
        azone->weak_refs_table = new_entries;

        if (old_entries) {
            weak_entry_t *entry;
            weak_entry_t *end = old_entries + old_max;
            for (entry = old_entries; entry < end; entry++) {
                weak_entry_insert_no_lock(azone, entry);
            }
            aux_free(old_entries);
        }
    }
}
Exemplo n.º 6
0
 bool Monitor::is_object(void *ptr, long size) {
     if (_class_list) {
         int count = _class_list(NULL, 0);
         if (count > _class_count) {
             void **buffer = (void**) aux_malloc(count * sizeof(void*));
             int new_count = _class_list(buffer, count);
             while (new_count > count) {
                 count = new_count;
                 buffer = (void**) aux_realloc(buffer, count * sizeof(void*));
                 new_count = _class_list(buffer, count);
             }
             _class_count = count;
             for (int i = 0; i < count; i++) ptr_set_add(_class_set, buffer[i]);
             aux_free(buffer);
         }
         // XXX_PCB shouldn't be hard coding this!
         objc_class_header *isa = *(objc_class_header**)ptr;
         return isa && ptr_set_is_member(_class_set, isa) && (size >= isa->instance_size);
     }
     return false;
 }
Exemplo n.º 7
0
    //
    // process_local_garbage
    //
    void ThreadLocalCollector::process_local_garbage(bool finalizeNow) {
        // Gather the garbage blocks into a contiguous data structure that can be passed to Zone::invalidate_garbage / free_garbage.
        // TODO:  revisit this when we change the collector to use bitmaps to represent garbage lists.
        usword_t garbage_count = _localBlocks.count() - _markedBlocksCounter;
        if (garbage_count == 0) {
            // TODO:  if we keep hitting this condition, we could use feedback to increase the thread local threshold.
            _localBlocks.clearFlags();    // clears flags only.
			GARBAGE_COLLECTION_COLLECTION_END((auto_zone_t*)_zone, 0ull, 0ull, _localBlocks.count(), (uint64_t)(-1));
            return;
        }

        garbage_list *list = (garbage_list *)aux_malloc(sizeof(garbage_list) + garbage_count * sizeof(vm_address_t));
        list->count = 0;
        list->zone = _zone;
        size_t list_count = 0;
		size_t remaining_size = 0, scavenged_size = 0; 
        
        for (uint32_t i = _localBlocks.firstOccupiedSlot(), last = _localBlocks.lastOccupiedSlot(); i <= last; i++) {
            void *block = _localBlocks.unmarkedPointerAtIndex(i);
            if (block) {
                Subzone *subzone = Subzone::subzone(block);
                if (subzone->is_thread_local(block)) {
					if (GARBAGE_COLLECTION_COLLECTION_END_ENABLED()) {
						scavenged_size += subzone->size(block);
					}
                    list->garbage[list_count++] = reinterpret_cast<vm_address_t>(block);
                    _localBlocks.remove(i);
                    subzone->mark_local_garbage(block);
                } else {
                    auto_error(_zone, "not thread local garbage", (const void *)block);
                }
            } else if (GARBAGE_COLLECTION_COLLECTION_END_ENABLED()) {
				block = _localBlocks.markedPointerAtIndex(i);
				if (block) {
					Subzone *subzone = Subzone::subzone(block);
					if (subzone->is_thread_local(block)) {
						remaining_size += subzone->size(block);
            }
        }
			}
        }
        list->count = list_count;
        
        // clear the marks & compact. must be done before evict_local_garbage(), which does more marking.
        _localBlocks.clearFlagsCompact();

        // if using GCD to finalize and free the garbage, we now compute the set of blocks reachable from the garbage, and cause those to be
        // evicted from the local set.
        if (!finalizeNow) evict_local_garbage(list_count, list->garbage);

        AUTO_PROBE(auto_probe_end_local_scan(list_count, list->garbage));
        
        if (finalizeNow) {
            _zone->invalidate_garbage(list_count, list->garbage);
            scavenge_local(list->count, list->garbage);
            aux_free(list);
        } else {
#if USE_DISPATCH_QUEUE
            dispatch_async(_zone->collection_queue, ^{ finalize_work(list); });
#else
            // should never happen in pthread case.
            __builtin_trap();
#endif
        }