Ejemplo n.º 1
0
    //
    // scavenge_local
    //
    // we can't return to the general pool because the general collector thread may
    // still be scanning us.  Instead we return data to our cache.
    //
    void ThreadLocalCollector::scavenge_local(size_t count, vm_address_t garbage[]) {
        size_t blocks_freed = 0;
        size_t bytes_freed = 0;
		GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE);
        for (size_t index = 0; index < count; index++) {
            void *ptr = reinterpret_cast<void*>(garbage[index]);
            // Only small quantum blocks are currently allocated locally, take advantage of that.
            Subzone *subzone = Subzone::subzone(ptr);
            usword_t q = subzone->quantum_index_unchecked(ptr, allocate_quantum_small_log2);
            if (!subzone->has_refcount(q)) {
                blocks_freed++;
                bytes_freed += subzone->size(q);
                if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(_zone), uintptr_t(ptr), 0, 0, 0);
                _thread.thread_cache_add(ptr);
            } else {
                _zone->handle_overretained_garbage(ptr, _zone->block_refcount(ptr));
                // make_global ???
            }
        }
		GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE, (uint64_t)blocks_freed, (uint64_t)bytes_freed);

        __sync_add_and_fetch(&_zone->stats.thread_collections_total, 1);
        __sync_add_and_fetch(&_zone->stats.thread_blocks_recovered_total, blocks_freed);
        __sync_add_and_fetch(&_zone->stats.thread_bytes_recovered_total, bytes_freed);

#if DEBUG
        __sync_add_and_fetch(&blocks_scavenged_locally, blocks_freed);
        __sync_add_and_fetch(&bytes_scavenged_locally, bytes_freed);
#endif
    }
Ejemplo n.º 2
0
 void ThreadLocalCollector::evict_local_garbage(size_t count, vm_address_t garbage[]) {
     set_marked_blocks_buffer(NULL);
     // scan the garbage blocks first.
     _markedBlocksCounter = 0;
     for (size_t i = 0; i < count; ++i) {
         void *block = (void*)garbage[i];
         Subzone *sz = Subzone::subzone(block);
         usword_t layout = sz->layout(block);
         if (!(layout & AUTO_UNSCANNED)) {
             Range range(block, sz->size(block));
             const unsigned char *map = (layout & AUTO_OBJECT) ? _zone->layout_map_for_block(block) : NULL;
             if (map)
                 scan_with_layout(range, map, NULL); // TLC doesn't need the write barrier.
             else
                 scan_range(range, NULL);
          }
     }
     
     // if no blocks were marked, then no evicitions needed.
     if (_markedBlocksCounter == 0) return;
     
     // now, mark all blocks reachable from the garbage blocks themselves.
     scan_marked_blocks();
     for (uint32_t i = _localBlocks.firstOccupiedSlot(); i <= _localBlocks.lastOccupiedSlot(); i++) {
         if (!_localBlocks.validPointerAtIndex(i))
             continue;
         if (_localBlocks.wasMarked(i)) {
             void *block = _localBlocks[i];
             Subzone *subzone = Subzone::subzone(block);
             // evict this block, since it is reachable from garbage, but not itself garbage.
             subzone->make_global(block);
             _localBlocks.remove(i);
         }
     }
 }
 inline void ThreadLocalCollector::mark_local_garbage(void **garbage_list, size_t garbage_count) {
     for (size_t i = 0; i < garbage_count; i++) {
         void *block = garbage_list[i];
         Subzone *subzone = Subzone::subzone(block);
         usword_t q = subzone->quantum_index_unchecked(block);
         subzone->mark_local_garbage(q);
     }
 }
Ejemplo n.º 4
0
 //
 // send_block_info
 //
 // Send specific information about block
 //
 void Monitor::send_block_info(Zone *zone, void *block) {
     if (zone->in_subzone_memory(block)) {
         Subzone *subzone = Subzone::subzone(block);
         return send_block_info(zone, subzone, subzone->quantum_index(block), block);
     } else if (zone->in_large_memory(block)) {
         return send_block_info(zone, Large::large(block), block);
     } else {
         ASSERTION(0 && "not a block");
     }
 }
 //
 // scan_marked_blocks
 //
 // scans all the blocks in _tlcBuffer
 //
 void ThreadLocalCollector::scan_marked_blocks() {
     size_t index = 0;
     while (index < _tlcBufferCount) {
         void *block = _tlcBuffer[index++];
         Subzone *subzone = Subzone::subzone(block);
         usword_t q = subzone->quantum_index_unchecked(block);
         if (subzone->should_scan_local_block(q)) {
             scan_local_block(subzone, q, block);
         }
     }
 }
Ejemplo n.º 6
0
 //
 // scan_marked_blocks
 //
 void ThreadLocalCollector::scan_marked_blocks() {
     for (uint32_t i = _localBlocks.firstOccupiedSlot(), last = _localBlocks.lastOccupiedSlot(); i <= last; i++) {
         void *block = _localBlocks.markedUnscannedPointerAtIndex(i);
         if (block) {
             Subzone *subzone = Subzone::subzone(block);
             if (subzone->should_scan_local_block(block)) {
                 scan_local_block(block);
             }
         }
     }
 }
    //
    // process_local_garbage
    //
    void ThreadLocalCollector::process_local_garbage(void (*garbage_list_handler)(ThreadLocalCollector *)) {
        // Gather the garbage blocks into _tlcBuffer, which currently holds marked blocks.
        usword_t garbage_count = _localBlocks.count() - _tlcBufferCount;
        if (garbage_count == 0) {
            // no garbage
            // TODO:  if we keep hitting this condition, we could use feedback to increase the thread local threshold.
            _localBlocks.clearFlags();    // clears flags only.
			GARBAGE_COLLECTION_COLLECTION_END((auto_zone_t*)_zone, 0ull, 0ull, _localBlocks.count(), (uint64_t)(-1));
            return;
        }

        _tlcBufferCount = 0;
        size_t scavenged_size = 0; 
        
        // use the mark bit in _localBlocks to generate a garbage list in _tlcBuffer/_tlcBufferCount
        for (uint32_t i = _localBlocks.firstOccupiedSlot(), last = _localBlocks.lastOccupiedSlot(); (i <= last) && (_tlcBufferCount != garbage_count); i++) {
            void *block = _localBlocks.unmarkedPointerAtIndex(i);
            if (block) {
                Subzone *subzone = Subzone::subzone(block);
                usword_t q = subzone->quantum_index_unchecked(block);
                if (subzone->is_thread_local(q)) {
						scavenged_size += subzone->size(q);
                    append_block(block);
                    _localBlocks.remove(i);
                } else {
                    auto_error(_zone, "not thread local garbage", (const void *)block);
                }
            }
        }
#ifdef MEASURE_TLC_STATS
        _zone->statistics().add_local_collected(_tlcBufferCount);
#endif
        
        // clear the marks & compact. must be done before evict_local_garbage(), which does more marking.
        // if the thread is not suspended then we can also possibly shrink the locals list size
        // if the thread IS suspended then we must not allocate
        if (_thread.suspended())
            _localBlocks.clearFlagsRehash();
        else
            _localBlocks.clearFlagsCompact();
        
        AUTO_PROBE(auto_probe_end_local_scan(_tlcBufferCount, &_tlcBuffer[0]));

        garbage_list_handler(this);

        // skip computing the locals size if the probe is not enabled
        if (GARBAGE_COLLECTION_COLLECTION_PHASE_END_ENABLED())
            GARBAGE_COLLECTION_COLLECTION_END((auto_zone_t*)_zone, garbage_count, (uint64_t)scavenged_size, _localBlocks.count(), (uint64_t)_localBlocks.localsSize());
    }
 // Assumes _tlcBuffer/_tlcBufferCount hold the garbage list
 void ThreadLocalCollector::evict_local_garbage() {
     // scan the garbage blocks to evict all blocks reachable from the garbage list
     size_t evict_cursor = _tlcBufferCount;
     
     size_t scan_cursor = 0;
     while (scan_cursor < _tlcBufferCount) {
         void *block = _tlcBuffer[scan_cursor++];
         Subzone *subzone = Subzone::subzone(block);
         usword_t q = subzone->quantum_index_unchecked(block);
         if (subzone->is_scanned(q)) {
             scan_local_block(subzone, q, block);
         }
     }
     
     usword_t global_size = 0;
     while (evict_cursor < _tlcBufferCount) {
         void *block = _tlcBuffer[evict_cursor++];
         // evict this block, since it is reachable from garbage, but not itself garbage.
         Subzone *subzone = Subzone::subzone(block);
         usword_t q = subzone->quantum_index_unchecked(block);
         subzone->make_global(q);
         _localBlocks.remove(block);
         global_size += subzone->size(q);
     }
     if (global_size != 0)
         _zone->adjust_allocation_counter(global_size);
 }
    //
    // scavenge_local
    //
    // we can't return to the general pool because the general collector thread may
    // still be scanning us.  Instead we return data to our cache.
    //
    void ThreadLocalCollector::scavenge_local(size_t count, void *garbage[]) {
        size_t blocks_freed = 0;
        size_t bytes_freed = 0;
        size_t bytes_dropped = 0;
        
        // if collection checking is on then clear the check count for all the garbage blocks
        Zone *zone = _thread.zone();
        if (zone->collection_checking_enabled()) {
            zone->clear_garbage_checking_count(garbage, count);
        }
        
		GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE);
        for (size_t index = 0; index < count; index++) {
            void *block = garbage[index];
            // Only small quantum blocks are currently allocated locally, take advantage of that.
            Subzone *subzone = Subzone::subzone(block);
            usword_t q = subzone->quantum_index_unchecked(block);
            if (!subzone->has_refcount(q)) {
                blocks_freed++;
                size_t block_size = subzone->size(q);
                if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(_zone), uintptr_t(block), 0, 0, 0);
                if (!_thread.thread_cache_add(block, subzone, q)) {
                    // drop the block on the floor and leave it for the heap collector to find
                    subzone->allocate(q, subzone->length(q), AUTO_UNSCANNED, false, false);
                    bytes_dropped += block_size;
                } else {
                    bytes_freed += block_size;
                }
            } else {
                SubzoneBlockRef ref(subzone, q);
                if (!is_zombie(block)) {
                    _zone->handle_overretained_garbage(block, ref.refcount(), ref.layout());
                } else {
                    // transition the block from local garbage to retained global
                    SpinLock lock(subzone->admin()->lock()); // zombify_internal requires we hold the admin lock
                    subzone->allocate(q, subzone->length(q), subzone->layout(q), true, false);
                    _zone->zombify_internal(ref);
                }
            }
        }
        if (bytes_dropped) {
            _zone->adjust_allocation_counter(bytes_dropped);
        }
		GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)_zone, AUTO_TRACE_SCAVENGING_PHASE, (uint64_t)blocks_freed, (uint64_t)bytes_freed);
    }
Ejemplo n.º 10
0
    //
    // process_local_garbage
    //
    void ThreadLocalCollector::process_local_garbage(bool finalizeNow) {
        // Gather the garbage blocks into a contiguous data structure that can be passed to Zone::invalidate_garbage / free_garbage.
        // TODO:  revisit this when we change the collector to use bitmaps to represent garbage lists.
        usword_t garbage_count = _localBlocks.count() - _markedBlocksCounter;
        if (garbage_count == 0) {
            // TODO:  if we keep hitting this condition, we could use feedback to increase the thread local threshold.
            _localBlocks.clearFlags();    // clears flags only.
			GARBAGE_COLLECTION_COLLECTION_END((auto_zone_t*)_zone, 0ull, 0ull, _localBlocks.count(), (uint64_t)(-1));
            return;
        }

        garbage_list *list = (garbage_list *)aux_malloc(sizeof(garbage_list) + garbage_count * sizeof(vm_address_t));
        list->count = 0;
        list->zone = _zone;
        size_t list_count = 0;
		size_t remaining_size = 0, scavenged_size = 0; 
        
        for (uint32_t i = _localBlocks.firstOccupiedSlot(), last = _localBlocks.lastOccupiedSlot(); i <= last; i++) {
            void *block = _localBlocks.unmarkedPointerAtIndex(i);
            if (block) {
                Subzone *subzone = Subzone::subzone(block);
                if (subzone->is_thread_local(block)) {
					if (GARBAGE_COLLECTION_COLLECTION_END_ENABLED()) {
						scavenged_size += subzone->size(block);
					}
                    list->garbage[list_count++] = reinterpret_cast<vm_address_t>(block);
                    _localBlocks.remove(i);
                    subzone->mark_local_garbage(block);
                } else {
                    auto_error(_zone, "not thread local garbage", (const void *)block);
                }
            } else if (GARBAGE_COLLECTION_COLLECTION_END_ENABLED()) {
				block = _localBlocks.markedPointerAtIndex(i);
				if (block) {
					Subzone *subzone = Subzone::subzone(block);
					if (subzone->is_thread_local(block)) {
						remaining_size += subzone->size(block);
            }
        }
			}
        }
        list->count = list_count;
        
        // clear the marks & compact. must be done before evict_local_garbage(), which does more marking.
        _localBlocks.clearFlagsCompact();

        // if using GCD to finalize and free the garbage, we now compute the set of blocks reachable from the garbage, and cause those to be
        // evicted from the local set.
        if (!finalizeNow) evict_local_garbage(list_count, list->garbage);

        AUTO_PROBE(auto_probe_end_local_scan(list_count, list->garbage));
        
        if (finalizeNow) {
            _zone->invalidate_garbage(list_count, list->garbage);
            scavenge_local(list->count, list->garbage);
            aux_free(list);
        } else {
#if USE_DISPATCH_QUEUE
            dispatch_async(_zone->collection_queue, ^{ finalize_work(list); });
#else
            // should never happen in pthread case.
            __builtin_trap();
#endif
        }