void DirtyCardQueueSet::concatenate_logs() { // Iterate over all the threads, if we find a partial log add it to // the global list of logs. Temporarily turn off the limit on the number // of outstanding buffers. int save_max_completed_queue = _max_completed_queue; _max_completed_queue = max_jint; assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); for (JavaThread* t = Threads::first(); t; t = t->next()) { DirtyCardQueue& dcq = t->dirty_card_queue(); if (dcq.size() != 0) { void **buf = t->dirty_card_queue().get_buf(); // We must NULL out the unused entries, then enqueue. for (size_t i = 0; i < t->dirty_card_queue().get_index(); i += oopSize) { buf[PtrQueue::byte_index_to_index((int)i)] = NULL; } enqueue_complete_buffer(dcq.get_buf(), dcq.get_index()); dcq.reinitialize(); } } if (_shared_dirty_card_queue.size() != 0) { enqueue_complete_buffer(_shared_dirty_card_queue.get_buf(), _shared_dirty_card_queue.get_index()); _shared_dirty_card_queue.reinitialize(); } // Restore the completed buffer queue limit. _max_completed_queue = save_max_completed_queue; }
void DirtyCardQueueSet::concatenate_log(DirtyCardQueue& dcq) { if (!dcq.is_empty()) { enqueue_complete_buffer( BufferNode::make_node_from_buffer(dcq.get_buf(), dcq.get_index())); dcq.reinitialize(); } }
bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) { if (Thread::current()->is_Java_thread()) { // We don't lock. It is fine to be epsilon-precise here. if (_max_completed_queue == 0 || _max_completed_queue > 0 && _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) { bool b = mut_process_buffer(buf); if (b) { // True here means that the buffer hasn't been deallocated and the caller may reuse it. return true; } } } // The buffer will be enqueued. The caller will have to get a new one. enqueue_complete_buffer(buf); return false; }
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl, uint worker_i, size_t stop_at, bool during_pause) { assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause"); BufferNode* nd = get_completed_buffer(stop_at); if (nd == NULL) { return false; } else { if (apply_closure_to_buffer(cl, nd, true, worker_i)) { assert_fully_consumed(nd, buffer_size()); // Done with fully processed buffer. deallocate_buffer(nd); Atomic::inc(&_processed_buffers_rs_thread); } else { // Return partially processed buffer to the queue. guarantee(!during_pause, "Should never stop early"); enqueue_complete_buffer(nd); } return true; } }
bool DirtyCardQueueSet:: apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl, int worker_i, BufferNode* nd) { if (nd != NULL) { void **buf = BufferNode::make_buffer_from_node(nd); size_t index = nd->index(); bool b = DirtyCardQueue::apply_closure_to_buffer(cl, buf, index, _sz, true, worker_i); if (b) { deallocate_buffer(buf); return true; // In normal case, go on to next buffer. } else { enqueue_complete_buffer(buf, index); return false; } } else { return false; } }