// Assumes _tlcBuffer/_tlcBufferCount hold the garbage list void ThreadLocalCollector::finalize_local_garbage_later(ThreadLocalCollector *tlc) { size_t garbage_count = tlc->_tlcBufferCount; tlc->evict_local_garbage(); // note this modifies _tlcBuffer/_tlcBufferCount mark_local_garbage(tlc->_tlcBuffer, garbage_count); Zone *z = tlc->_zone; void **garbage_copy = (void **)aux_malloc(garbage_count * sizeof(void *)); memcpy(garbage_copy, tlc->_tlcBuffer, garbage_count * sizeof(void *)); dispatch_async(tlc->_zone->_collection_queue, ^{ finalize_work(z, garbage_count, garbage_copy); });
bool Monitor::is_object(void *ptr, long size) { if (_class_list) { int count = _class_list(NULL, 0); if (count > _class_count) { void **buffer = (void**) aux_malloc(count * sizeof(void*)); int new_count = _class_list(buffer, count); while (new_count > count) { count = new_count; buffer = (void**) aux_realloc(buffer, count * sizeof(void*)); new_count = _class_list(buffer, count); } _class_count = count; for (int i = 0; i < count; i++) ptr_set_add(_class_set, buffer[i]); aux_free(buffer); } // XXX_PCB shouldn't be hard coding this! objc_class_header *isa = *(objc_class_header**)ptr; return isa && ptr_set_is_member(_class_set, isa) && (size >= isa->instance_size); } return false; }
// // process_local_garbage // void ThreadLocalCollector::process_local_garbage(bool finalizeNow) { // Gather the garbage blocks into a contiguous data structure that can be passed to Zone::invalidate_garbage / free_garbage. // TODO: revisit this when we change the collector to use bitmaps to represent garbage lists. usword_t garbage_count = _localBlocks.count() - _markedBlocksCounter; if (garbage_count == 0) { // TODO: if we keep hitting this condition, we could use feedback to increase the thread local threshold. _localBlocks.clearFlags(); // clears flags only. GARBAGE_COLLECTION_COLLECTION_END((auto_zone_t*)_zone, 0ull, 0ull, _localBlocks.count(), (uint64_t)(-1)); return; } garbage_list *list = (garbage_list *)aux_malloc(sizeof(garbage_list) + garbage_count * sizeof(vm_address_t)); list->count = 0; list->zone = _zone; size_t list_count = 0; size_t remaining_size = 0, scavenged_size = 0; for (uint32_t i = _localBlocks.firstOccupiedSlot(), last = _localBlocks.lastOccupiedSlot(); i <= last; i++) { void *block = _localBlocks.unmarkedPointerAtIndex(i); if (block) { Subzone *subzone = Subzone::subzone(block); if (subzone->is_thread_local(block)) { if (GARBAGE_COLLECTION_COLLECTION_END_ENABLED()) { scavenged_size += subzone->size(block); } list->garbage[list_count++] = reinterpret_cast<vm_address_t>(block); _localBlocks.remove(i); subzone->mark_local_garbage(block); } else { auto_error(_zone, "not thread local garbage", (const void *)block); } } else if (GARBAGE_COLLECTION_COLLECTION_END_ENABLED()) { block = _localBlocks.markedPointerAtIndex(i); if (block) { Subzone *subzone = Subzone::subzone(block); if (subzone->is_thread_local(block)) { remaining_size += subzone->size(block); } } } } list->count = list_count; // clear the marks & compact. must be done before evict_local_garbage(), which does more marking. _localBlocks.clearFlagsCompact(); // if using GCD to finalize and free the garbage, we now compute the set of blocks reachable from the garbage, and cause those to be // evicted from the local set. if (!finalizeNow) evict_local_garbage(list_count, list->garbage); AUTO_PROBE(auto_probe_end_local_scan(list_count, list->garbage)); if (finalizeNow) { _zone->invalidate_garbage(list_count, list->garbage); scavenge_local(list->count, list->garbage); aux_free(list); } else { #if USE_DISPATCH_QUEUE dispatch_async(_zone->collection_queue, ^{ finalize_work(list); }); #else // should never happen in pthread case. __builtin_trap(); #endif }