void G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id) { // The table is divided into partitions to allow lock-less parallel processing by // multiple worker threads. A worker thread first claims a partition, which ensures // exclusive access to that part of the table, then continues to process it. To allow // shrinking of the table in parallel we also need to make sure that the same worker // thread processes all partitions where entries will hash to the same destination // partition. Since the table size is always a power of two and we always shrink by // dividing the table in half, we know that for a given partition there is only one // other partition whoes entries will hash to the same destination partition. That // other partition is always the sibling partition in the second half of the table. // For example, if the table is divided into 8 partitions, the sibling of partition 0 // is partition 4, the sibling of partition 1 is partition 5, etc. size_t table_half = _table->_size / 2; // Let each partition be one page worth of buckets size_t partition_size = MIN2(table_half, os::vm_page_size() / sizeof(G1StringDedupEntry*)); assert(table_half % partition_size == 0, "Invalid partition size"); // Number of entries removed during the scan uintx removed = 0; for (;;) { // Grab next partition to scan size_t partition_begin = cl->claim_table_partition(partition_size); size_t partition_end = partition_begin + partition_size; if (partition_begin >= table_half) { // End of table break; } // Scan the partition followed by the sibling partition in the second half of the table removed += unlink_or_oops_do(cl, partition_begin, partition_end, worker_id); removed += unlink_or_oops_do(cl, table_half + partition_begin, table_half + partition_end, worker_id); } // Delayed update avoid contention on the table lock if (removed > 0) { MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag); _table->_entries -= removed; _entries_removed += removed; } }
void G1StringDedupQueue::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl) { // A worker thread first claims a queue, which ensures exclusive // access to that queue, then continues to process it. for (;;) { // Grab next queue to scan size_t queue = cl->claim_queue(); if (queue >= _queue->_nqueues) { // End of queues break; } // Scan the queue unlink_or_oops_do(cl, queue); } }
void G1StringDedup::unlink(BoolObjectClosure* is_alive) { assert(is_enabled(), "String deduplication not enabled"); // Don't allow a potential resize or rehash during unlink, as the unlink // operation itself might remove enough entries to invalidate such a decision. unlink_or_oops_do(is_alive, NULL, false /* allow_resize_and_rehash */); }
void G1StringDedup::oops_do(OopClosure* keep_alive) { assert(is_enabled(), "String deduplication not enabled"); unlink_or_oops_do(NULL, keep_alive); }
void G1StringDedup::oops_do(OopClosure* keep_alive) { assert(is_enabled(), "String deduplication not enabled"); unlink_or_oops_do(NULL, keep_alive, true /* allow_resize_and_rehash */); }
static void unlink(BoolObjectClosure* cl, int* processed, int* removed) { unlink_or_oops_do(cl, NULL, processed, removed); }
static void unlink(BoolObjectClosure* cl) { int processed = 0; int removed = 0; unlink_or_oops_do(cl, NULL, &processed, &removed); }
// GC support // Delete pointers to otherwise-unreachable objects. static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f) { int processed = 0; int removed = 0; unlink_or_oops_do(cl, f, &processed, &removed); }