// Assumes _tlcBuffer/_tlcBufferCount hold the garbage list
 void ThreadLocalCollector::evict_local_garbage() {
     // scan the garbage blocks to evict all blocks reachable from the garbage list
     size_t evict_cursor = _tlcBufferCount;
     
     size_t scan_cursor = 0;
     while (scan_cursor < _tlcBufferCount) {
         void *block = _tlcBuffer[scan_cursor++];
         Subzone *subzone = Subzone::subzone(block);
         usword_t q = subzone->quantum_index_unchecked(block);
         if (subzone->is_scanned(q)) {
             scan_local_block(subzone, q, block);
         }
     }
     
     usword_t global_size = 0;
     while (evict_cursor < _tlcBufferCount) {
         void *block = _tlcBuffer[evict_cursor++];
         // evict this block, since it is reachable from garbage, but not itself garbage.
         Subzone *subzone = Subzone::subzone(block);
         usword_t q = subzone->quantum_index_unchecked(block);
         subzone->make_global(q);
         _localBlocks.remove(block);
         global_size += subzone->size(q);
     }
     if (global_size != 0)
         _zone->adjust_allocation_counter(global_size);
 }
Exemplo n.º 2
0
    //
    // scavenge_local
    //
    // we can't return to the general pool because the general collector thread may
    // still be scanning us.  Instead we return data to our cache.
    //
    void ThreadLocalCollector::scavenge_local(size_t count, vm_address_t garbage[]) {
        size_t blocks_freed = 0;
        size_t bytes_freed = 0;
		GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE);
        for (size_t index = 0; index < count; index++) {
            void *ptr = reinterpret_cast<void*>(garbage[index]);
            // Only small quantum blocks are currently allocated locally, take advantage of that.
            Subzone *subzone = Subzone::subzone(ptr);
            usword_t q = subzone->quantum_index_unchecked(ptr, allocate_quantum_small_log2);
            if (!subzone->has_refcount(q)) {
                blocks_freed++;
                bytes_freed += subzone->size(q);
                if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(_zone), uintptr_t(ptr), 0, 0, 0);
                _thread.thread_cache_add(ptr);
            } else {
                _zone->handle_overretained_garbage(ptr, _zone->block_refcount(ptr));
                // make_global ???
            }
        }
		GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE, (uint64_t)blocks_freed, (uint64_t)bytes_freed);

        __sync_add_and_fetch(&_zone->stats.thread_collections_total, 1);
        __sync_add_and_fetch(&_zone->stats.thread_blocks_recovered_total, blocks_freed);
        __sync_add_and_fetch(&_zone->stats.thread_bytes_recovered_total, bytes_freed);

#if DEBUG
        __sync_add_and_fetch(&blocks_scavenged_locally, blocks_freed);
        __sync_add_and_fetch(&bytes_scavenged_locally, bytes_freed);
#endif
    }
 inline void ThreadLocalCollector::mark_local_garbage(void **garbage_list, size_t garbage_count) {
     for (size_t i = 0; i < garbage_count; i++) {
         void *block = garbage_list[i];
         Subzone *subzone = Subzone::subzone(block);
         usword_t q = subzone->quantum_index_unchecked(block);
         subzone->mark_local_garbage(q);
     }
 }
 //
 // scan_marked_blocks
 //
 // scans all the blocks in _tlcBuffer
 //
 void ThreadLocalCollector::scan_marked_blocks() {
     size_t index = 0;
     while (index < _tlcBufferCount) {
         void *block = _tlcBuffer[index++];
         Subzone *subzone = Subzone::subzone(block);
         usword_t q = subzone->quantum_index_unchecked(block);
         if (subzone->should_scan_local_block(q)) {
             scan_local_block(subzone, q, block);
         }
     }
 }
    //
    // process_local_garbage
    //
    void ThreadLocalCollector::process_local_garbage(void (*garbage_list_handler)(ThreadLocalCollector *)) {
        // Gather the garbage blocks into _tlcBuffer, which currently holds marked blocks.
        usword_t garbage_count = _localBlocks.count() - _tlcBufferCount;
        if (garbage_count == 0) {
            // no garbage
            // TODO:  if we keep hitting this condition, we could use feedback to increase the thread local threshold.
            _localBlocks.clearFlags();    // clears flags only.
			GARBAGE_COLLECTION_COLLECTION_END((auto_zone_t*)_zone, 0ull, 0ull, _localBlocks.count(), (uint64_t)(-1));
            return;
        }

        _tlcBufferCount = 0;
        size_t scavenged_size = 0; 
        
        // use the mark bit in _localBlocks to generate a garbage list in _tlcBuffer/_tlcBufferCount
        for (uint32_t i = _localBlocks.firstOccupiedSlot(), last = _localBlocks.lastOccupiedSlot(); (i <= last) && (_tlcBufferCount != garbage_count); i++) {
            void *block = _localBlocks.unmarkedPointerAtIndex(i);
            if (block) {
                Subzone *subzone = Subzone::subzone(block);
                usword_t q = subzone->quantum_index_unchecked(block);
                if (subzone->is_thread_local(q)) {
						scavenged_size += subzone->size(q);
                    append_block(block);
                    _localBlocks.remove(i);
                } else {
                    auto_error(_zone, "not thread local garbage", (const void *)block);
                }
            }
        }
#ifdef MEASURE_TLC_STATS
        _zone->statistics().add_local_collected(_tlcBufferCount);
#endif
        
        // clear the marks & compact. must be done before evict_local_garbage(), which does more marking.
        // if the thread is not suspended then we can also possibly shrink the locals list size
        // if the thread IS suspended then we must not allocate
        if (_thread.suspended())
            _localBlocks.clearFlagsRehash();
        else
            _localBlocks.clearFlagsCompact();
        
        AUTO_PROBE(auto_probe_end_local_scan(_tlcBufferCount, &_tlcBuffer[0]));

        garbage_list_handler(this);

        // skip computing the locals size if the probe is not enabled
        if (GARBAGE_COLLECTION_COLLECTION_PHASE_END_ENABLED())
            GARBAGE_COLLECTION_COLLECTION_END((auto_zone_t*)_zone, garbage_count, (uint64_t)scavenged_size, _localBlocks.count(), (uint64_t)_localBlocks.localsSize());
    }
    //
    // scavenge_local
    //
    // we can't return to the general pool because the general collector thread may
    // still be scanning us.  Instead we return data to our cache.
    //
    void ThreadLocalCollector::scavenge_local(size_t count, void *garbage[]) {
        size_t blocks_freed = 0;
        size_t bytes_freed = 0;
        size_t bytes_dropped = 0;
        
        // if collection checking is on then clear the check count for all the garbage blocks
        Zone *zone = _thread.zone();
        if (zone->collection_checking_enabled()) {
            zone->clear_garbage_checking_count(garbage, count);
        }
        
		GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE);
        for (size_t index = 0; index < count; index++) {
            void *block = garbage[index];
            // Only small quantum blocks are currently allocated locally, take advantage of that.
            Subzone *subzone = Subzone::subzone(block);
            usword_t q = subzone->quantum_index_unchecked(block);
            if (!subzone->has_refcount(q)) {
                blocks_freed++;
                size_t block_size = subzone->size(q);
                if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(_zone), uintptr_t(block), 0, 0, 0);
                if (!_thread.thread_cache_add(block, subzone, q)) {
                    // drop the block on the floor and leave it for the heap collector to find
                    subzone->allocate(q, subzone->length(q), AUTO_UNSCANNED, false, false);
                    bytes_dropped += block_size;
                } else {
                    bytes_freed += block_size;
                }
            } else {
                SubzoneBlockRef ref(subzone, q);
                if (!is_zombie(block)) {
                    _zone->handle_overretained_garbage(block, ref.refcount(), ref.layout());
                } else {
                    // transition the block from local garbage to retained global
                    SpinLock lock(subzone->admin()->lock()); // zombify_internal requires we hold the admin lock
                    subzone->allocate(q, subzone->length(q), subzone->layout(q), true, false);
                    _zone->zombify_internal(ref);
                }
            }
        }
        if (bytes_dropped) {
            _zone->adjust_allocation_counter(bytes_dropped);
        }
		GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE, (uint64_t)blocks_freed, (uint64_t)bytes_freed);
    }