void SharedHeap::fill_region_with_object(MemRegion mr) { // Disable allocation events, since this isn't a "real" allocation. JVMPIAllocEventDisabler dis; size_t word_size = mr.word_size(); size_t aligned_array_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT)); if (word_size >= aligned_array_header_size) { const size_t array_length = pointer_delta(mr.end(), mr.start()) - typeArrayOopDesc::header_size(T_INT); const size_t array_length_words = array_length * (HeapWordSize/sizeof(jint)); post_allocation_setup_array(Universe::intArrayKlassObj(), mr.start(), mr.word_size(), (int)array_length_words); #ifdef ASSERT HeapWord* elt_words = (mr.start() + typeArrayOopDesc::header_size(T_INT)); Memory::set_words(elt_words, array_length, 0xDEAFBABE); #endif } else { assert(word_size == (size_t)oopDesc::header_size(), "Unaligned?"); post_allocation_setup_obj(SystemDictionary::object_klass(), mr.start(), mr.word_size()); } }
// This is the shared initialization code. It sets up the basic pointers, // and allows enough extra space for a filler object. We call a virtual // method, "lab_is_valid()" to handle the different asserts the old/young // labs require. void PSPromotionLAB::initialize(MemRegion lab) { assert(lab_is_valid(lab), "Sanity"); HeapWord* bottom = lab.start(); HeapWord* end = lab.end(); set_bottom(bottom); set_end(end); set_top(bottom); // We can be initialized to a zero size! if (free() > 0) { if (ZapUnusedHeapArea) { debug_only(Memory::set_words(top(), free()/HeapWordSize, badHeapWord)); } // NOTE! We need to allow space for a filler object. assert(lab.word_size() >= filler_header_size, "lab is too small"); end = end - filler_header_size; set_end(end); _state = needs_flush; } else { _state = zero_size; } assert(this->top() <= this->end(), "pointers out of order"); }
G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size) : _reserved(reserved), _end(NULL) { size_t size = compute_size(reserved.word_size()); ReservedSpace rs(ReservedSpace::allocation_align_size_up(size)); if (!rs.is_reserved()) { vm_exit_during_initialization("Could not reserve enough space for heap offset array"); } if (!_vs.initialize(rs, 0)) { vm_exit_during_initialization("Could not reserve enough space for heap offset array"); } _offset_array = (u_char*)_vs.low_boundary(); resize(init_word_size); if (TraceBlockOffsetTable) { gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: "); gclog_or_tty->print_cr(" " " rs.base(): " INTPTR_FORMAT " rs.size(): " INTPTR_FORMAT " rs end(): " INTPTR_FORMAT, rs.base(), rs.size(), rs.base() + rs.size()); gclog_or_tty->print_cr(" " " _vs.low_boundary(): " INTPTR_FORMAT " _vs.high_boundary(): " INTPTR_FORMAT, _vs.low_boundary(), _vs.high_boundary()); } }
// This is the shared initialization code. It sets up the basic pointers, // and allows enough extra space for a filler object. We call a virtual // method, "lab_is_valid()" to handle the different asserts the old/young // labs require. void PSPromotionLAB::initialize(MemRegion lab) { assert(lab_is_valid(lab), "Sanity"); HeapWord* bottom = lab.start(); HeapWord* end = lab.end(); set_bottom(bottom); set_end(end); set_top(bottom); // Initialize after VM starts up because header_size depends on compressed // oops. filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT)); // We can be initialized to a zero size! if (free() > 0) { if (ZapUnusedHeapArea) { debug_only(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord)); } // NOTE! We need to allow space for a filler object. assert(lab.word_size() >= filler_header_size, "lab is too small"); end = end - filler_header_size; set_end(end); _state = needs_flush; } else { _state = zero_size; } assert(this->top() <= this->end(), "pointers out of order"); }
BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size): _reserved(reserved), _end(NULL) { size_t size = compute_size(reserved.word_size()); ReservedSpace rs(size); if (!rs.is_reserved()) { vm_exit_during_initialization("Could not reserve enough space for heap offset array"); } MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); if (!_vs.initialize(rs, 0)) { vm_exit_during_initialization("Could not reserve enough space for heap offset array"); } _offset_array = (u_char*)_vs.low_boundary(); resize(init_word_size); if (TraceBlockOffsetTable) { gclog_or_tty->print_cr("BlockOffsetSharedArray::BlockOffsetSharedArray: "); gclog_or_tty->print_cr(" " " rs.base(): " INTPTR_FORMAT " rs.size(): " INTPTR_FORMAT " rs end(): " INTPTR_FORMAT, rs.base(), rs.size(), rs.base() + rs.size()); gclog_or_tty->print_cr(" " " _vs.low_boundary(): " INTPTR_FORMAT " _vs.high_boundary(): " INTPTR_FORMAT, _vs.low_boundary(), _vs.high_boundary()); } }
void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { assert(mr.word_size() > 0, "Error"); assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); // mr.end() may not necessarily be card aligned. jbyte* cur_entry = _ct->byte_for(mr.last()); const jbyte* limit = _ct->byte_for(mr.start()); HeapWord* end_of_non_clean = mr.end(); HeapWord* start_of_non_clean = end_of_non_clean; while (cur_entry >= limit) { HeapWord* cur_hw = _ct->addr_for(cur_entry); if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) { // Continue the dirty range by opening the // dirty window one card to the left. start_of_non_clean = cur_hw; } else { // We hit a "clean" card; process any non-empty // "dirty" range accumulated so far. if (start_of_non_clean < end_of_non_clean) { const MemRegion mrd(start_of_non_clean, end_of_non_clean); _dirty_card_closure->do_MemRegion(mrd); } // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary if (is_word_aligned(cur_entry)) { jbyte* cur_row = cur_entry - BytesPerWord; while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row()) { cur_row -= BytesPerWord; } cur_entry = cur_row + BytesPerWord; cur_hw = _ct->addr_for(cur_entry); } // Reset the dirty window, while continuing to look // for the next dirty card that will start a // new dirty window. end_of_non_clean = cur_hw; start_of_non_clean = cur_hw; } // Note that "cur_entry" leads "start_of_non_clean" in // its leftward excursion after this point // in the loop and, when we hit the left end of "mr", // will point off of the left end of the card-table // for "mr". cur_entry--; } // If the first card of "mr" was dirty, we will have // been left with a dirty window, co-initial with "mr", // which we now process. if (start_of_non_clean < end_of_non_clean) { const MemRegion mrd(start_of_non_clean, end_of_non_clean); _dirty_card_closure->do_MemRegion(mrd); } }
// Simply mangle the MemRegion mr. void SpaceMangler::mangle_region(MemRegion mr) { assert(ZapUnusedHeapArea, "Mangling should not be in use"); #ifdef ASSERT if(TraceZapUnusedHeapArea) { gclog_or_tty->print("Mangling [" PTR_FORMAT " to " PTR_FORMAT ")", p2i(mr.start()), p2i(mr.end())); } Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord); if(TraceZapUnusedHeapArea) { gclog_or_tty->print_cr(" done"); } #endif }
bool ParMarkBitMap::initialize(MemRegion covered_region) { const idx_t bits = bits_required(covered_region); // The bits will be divided evenly between two bitmaps; each of them should be // an integral number of words. assert(bits % (BitsPerWord * 2) == 0, "region size unaligned"); const size_t words = bits / BitsPerWord; const size_t raw_bytes = words * sizeof(idx_t); const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10); const size_t granularity = os::vm_allocation_granularity(); const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity)); const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : MAX2(page_sz, granularity); ReservedSpace rs(bytes, rs_align, rs_align > 0); os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz, rs.base(), rs.size()); MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); _virtual_space = new PSVirtualSpace(rs, page_sz); if (_virtual_space != NULL && _virtual_space->expand_by(bytes)) { _region_start = covered_region.start(); _region_size = covered_region.word_size(); idx_t* map = (idx_t*)_virtual_space->reserved_low_addr(); _beg_bits.set_map(map); _beg_bits.set_size(bits / 2); _end_bits.set_map(map + words / 2); _end_bits.set_size(bits / 2); return true; } _region_start = 0; _region_size = 0; if (_virtual_space != NULL) { delete _virtual_space; _virtual_space = NULL; // Release memory reserved in the space. rs.release(); } return false; }
inline size_t G1CMTask::scan_objArray(objArrayOop obj, MemRegion mr) { obj->oop_iterate(_cm_oop_closure, mr); return mr.word_size(); }
inline ParMarkBitMap::idx_t ParMarkBitMap::bits_required(MemRegion covered_region) { return bits_required(covered_region.word_size()); }
void ContiguousSpace::mangle_region(MemRegion mr) { debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord)); }