示例#1
0
    //
    // scavenge_local
    //
    // we can't return to the general pool because the general collector thread may
    // still be scanning us.  Instead we return data to our cache.
    //
    void ThreadLocalCollector::scavenge_local(size_t count, vm_address_t garbage[]) {
        size_t blocks_freed = 0;
        size_t bytes_freed = 0;
		GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE);
        for (size_t index = 0; index < count; index++) {
            void *ptr = reinterpret_cast<void*>(garbage[index]);
            // Only small quantum blocks are currently allocated locally, take advantage of that.
            Subzone *subzone = Subzone::subzone(ptr);
            usword_t q = subzone->quantum_index_unchecked(ptr, allocate_quantum_small_log2);
            if (!subzone->has_refcount(q)) {
                blocks_freed++;
                bytes_freed += subzone->size(q);
                if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(_zone), uintptr_t(ptr), 0, 0, 0);
                _thread.thread_cache_add(ptr);
            } else {
                _zone->handle_overretained_garbage(ptr, _zone->block_refcount(ptr));
                // make_global ???
            }
        }
		GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE, (uint64_t)blocks_freed, (uint64_t)bytes_freed);

        __sync_add_and_fetch(&_zone->stats.thread_collections_total, 1);
        __sync_add_and_fetch(&_zone->stats.thread_blocks_recovered_total, blocks_freed);
        __sync_add_and_fetch(&_zone->stats.thread_bytes_recovered_total, bytes_freed);

#if DEBUG
        __sync_add_and_fetch(&blocks_scavenged_locally, blocks_freed);
        __sync_add_and_fetch(&bytes_scavenged_locally, bytes_freed);
#endif
    }
    //
    // scavenge_local
    //
    // we can't return to the general pool because the general collector thread may
    // still be scanning us.  Instead we return data to our cache.
    //
    void ThreadLocalCollector::scavenge_local(size_t count, void *garbage[]) {
        size_t blocks_freed = 0;
        size_t bytes_freed = 0;
        size_t bytes_dropped = 0;
        
        // if collection checking is on then clear the check count for all the garbage blocks
        Zone *zone = _thread.zone();
        if (zone->collection_checking_enabled()) {
            zone->clear_garbage_checking_count(garbage, count);
        }
        
		GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE);
        for (size_t index = 0; index < count; index++) {
            void *block = garbage[index];
            // Only small quantum blocks are currently allocated locally, take advantage of that.
            Subzone *subzone = Subzone::subzone(block);
            usword_t q = subzone->quantum_index_unchecked(block);
            if (!subzone->has_refcount(q)) {
                blocks_freed++;
                size_t block_size = subzone->size(q);
                if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(_zone), uintptr_t(block), 0, 0, 0);
                if (!_thread.thread_cache_add(block, subzone, q)) {
                    // drop the block on the floor and leave it for the heap collector to find
                    subzone->allocate(q, subzone->length(q), AUTO_UNSCANNED, false, false);
                    bytes_dropped += block_size;
                } else {
                    bytes_freed += block_size;
                }
            } else {
                SubzoneBlockRef ref(subzone, q);
                if (!is_zombie(block)) {
                    _zone->handle_overretained_garbage(block, ref.refcount(), ref.layout());
                } else {
                    // transition the block from local garbage to retained global
                    SpinLock lock(subzone->admin()->lock()); // zombify_internal requires we hold the admin lock
                    subzone->allocate(q, subzone->length(q), subzone->layout(q), true, false);
                    _zone->zombify_internal(ref);
                }
            }
        }
        if (bytes_dropped) {
            _zone->adjust_allocation_counter(bytes_dropped);
        }
		GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE, (uint64_t)blocks_freed, (uint64_t)bytes_freed);
    }