Exemplo n.º 1
0
 void ThreadLocalCollector::evict_local_garbage(size_t count, vm_address_t garbage[]) {
     set_marked_blocks_buffer(NULL);
     // scan the garbage blocks first.
     _markedBlocksCounter = 0;
     for (size_t i = 0; i < count; ++i) {
         void *block = (void*)garbage[i];
         Subzone *sz = Subzone::subzone(block);
         usword_t layout = sz->layout(block);
         if (!(layout & AUTO_UNSCANNED)) {
             Range range(block, sz->size(block));
             const unsigned char *map = (layout & AUTO_OBJECT) ? _zone->layout_map_for_block(block) : NULL;
             if (map)
                 scan_with_layout(range, map, NULL); // TLC doesn't need the write barrier.
             else
                 scan_range(range, NULL);
          }
     }
     
     // if no blocks were marked, then no evicitions needed.
     if (_markedBlocksCounter == 0) return;
     
     // now, mark all blocks reachable from the garbage blocks themselves.
     scan_marked_blocks();
     for (uint32_t i = _localBlocks.firstOccupiedSlot(); i <= _localBlocks.lastOccupiedSlot(); i++) {
         if (!_localBlocks.validPointerAtIndex(i))
             continue;
         if (_localBlocks.wasMarked(i)) {
             void *block = _localBlocks[i];
             Subzone *subzone = Subzone::subzone(block);
             // evict this block, since it is reachable from garbage, but not itself garbage.
             subzone->make_global(block);
             _localBlocks.remove(i);
         }
     }
 }
    //
    // scavenge_local
    //
    // we can't return to the general pool because the general collector thread may
    // still be scanning us.  Instead we return data to our cache.
    //
    void ThreadLocalCollector::scavenge_local(size_t count, void *garbage[]) {
        size_t blocks_freed = 0;
        size_t bytes_freed = 0;
        size_t bytes_dropped = 0;
        
        // if collection checking is on then clear the check count for all the garbage blocks
        Zone *zone = _thread.zone();
        if (zone->collection_checking_enabled()) {
            zone->clear_garbage_checking_count(garbage, count);
        }
        
		GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE);
        for (size_t index = 0; index < count; index++) {
            void *block = garbage[index];
            // Only small quantum blocks are currently allocated locally, take advantage of that.
            Subzone *subzone = Subzone::subzone(block);
            usword_t q = subzone->quantum_index_unchecked(block);
            if (!subzone->has_refcount(q)) {
                blocks_freed++;
                size_t block_size = subzone->size(q);
                if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(_zone), uintptr_t(block), 0, 0, 0);
                if (!_thread.thread_cache_add(block, subzone, q)) {
                    // drop the block on the floor and leave it for the heap collector to find
                    subzone->allocate(q, subzone->length(q), AUTO_UNSCANNED, false, false);
                    bytes_dropped += block_size;
                } else {
                    bytes_freed += block_size;
                }
            } else {
                SubzoneBlockRef ref(subzone, q);
                if (!is_zombie(block)) {
                    _zone->handle_overretained_garbage(block, ref.refcount(), ref.layout());
                } else {
                    // transition the block from local garbage to retained global
                    SpinLock lock(subzone->admin()->lock()); // zombify_internal requires we hold the admin lock
                    subzone->allocate(q, subzone->length(q), subzone->layout(q), true, false);
                    _zone->zombify_internal(ref);
                }
            }
        }
        if (bytes_dropped) {
            _zone->adjust_allocation_counter(bytes_dropped);
        }
		GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE, (uint64_t)blocks_freed, (uint64_t)bytes_freed);
    }