size_t PSVirtualSpaceHighToLow::expand_into(PSVirtualSpace* other_space, size_t bytes) { assert(is_aligned(bytes), "arg not aligned"); assert(grows_down(), "this space must grow down"); assert(other_space->grows_up(), "other space must grow up"); assert(reserved_low_addr() == other_space->reserved_high_addr(), "spaces not contiguous"); assert(special() == other_space->special(), "one space is special in memory, the other is not"); DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this)); DEBUG_ONLY(PSVirtualSpaceVerifier other_verifier(other_space)); size_t bytes_needed = bytes; // First use the uncommitted region in this space. size_t tmp_bytes = MIN2(uncommitted_size(), bytes_needed); if (tmp_bytes > 0) { if (expand_by(tmp_bytes)) { bytes_needed -= tmp_bytes; } else { return 0; } } // Next take from the uncommitted region in the other space, and commit it. tmp_bytes = MIN2(other_space->uncommitted_size(), bytes_needed); if (tmp_bytes > 0) { char* const commit_base = committed_low_addr() - tmp_bytes; if (other_space->special() || os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) { // Reduce the reserved region in the other space. other_space->set_reserved(other_space->reserved_low_addr(), other_space->reserved_high_addr() - tmp_bytes, other_space->special()); // Grow both reserved and committed in this space. _reserved_low_addr -= tmp_bytes; _committed_low_addr -= tmp_bytes; bytes_needed -= tmp_bytes; } else { return bytes - bytes_needed; } } // Finally take from the already committed region in the other space. tmp_bytes = bytes_needed; if (tmp_bytes > 0) { // Reduce both committed and reserved in the other space. other_space->set_committed(other_space->committed_low_addr(), other_space->committed_high_addr() - tmp_bytes); other_space->set_reserved(other_space->reserved_low_addr(), other_space->reserved_high_addr() - tmp_bytes, other_space->special()); // Grow both reserved and committed in this space. _reserved_low_addr -= tmp_bytes; _committed_low_addr -= tmp_bytes; } return bytes; }
void PSOldGen::expand_to_reserved() { assert_lock_strong(ExpandHeap_lock); assert_locked_or_safepoint(Heap_lock); size_t remaining_bytes = _virtual_space.uncommitted_size(); if (remaining_bytes > 0) { bool success = expand_by(remaining_bytes); assert(success, "grow to reserved failed"); } }
void Line::detach() { if(!buf_len) //str points to a file buffer { char* save = str; int sz = size(); expand_by(sz); memcpy(str, save, sz+1); } }
// Deprecated. bool PSVirtualSpace::initialize(ReservedSpace rs, size_t commit_size) { set_reserved(rs); set_committed(reserved_low_addr(), reserved_low_addr()); // Commit to initial size. assert(commit_size <= rs.size(), "commit_size too big"); bool result = commit_size > 0 ? expand_by(commit_size) : true; DEBUG_ONLY(verify()); return result; }
void PSOldGen::expand(size_t bytes) { MutexLocker x(ExpandHeap_lock); size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(MinHeapDeltaBytes); bool success = false; if (aligned_expand_bytes > aligned_bytes) { success = expand_by(aligned_expand_bytes); } if (!success) { success = expand_by(aligned_bytes); } if (!success) { expand_to_reserved(); } if (GC_locker::is_active()) { // Tell the GC locker that we had to expand the heap GC_locker::heap_expanded(); } }
bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { if(!rs.is_reserved()) return false; // allocation failed. assert(_low_boundary == NULL, "VirtualSpace already initialized"); assert(max_commit_granularity > 0, "Granularity must be non-zero."); _low_boundary = rs.base(); _high_boundary = low_boundary() + rs.size(); _low = low_boundary(); _high = low(); _special = rs.special(); _executable = rs.executable(); // When a VirtualSpace begins life at a large size, make all future expansion // and shrinking occur aligned to a granularity of large pages. This avoids // fragmentation of physical addresses that inhibits the use of large pages // by the OS virtual memory system. Empirically, we see that with a 4MB // page size, the only spaces that get handled this way are codecache and // the heap itself, both of which provide a substantial performance // boost in many benchmarks when covered by large pages. // // No attempt is made to force large page alignment at the very top and // bottom of the space if they are not aligned so already. _lower_alignment = os::vm_page_size(); _middle_alignment = max_commit_granularity; _upper_alignment = os::vm_page_size(); // End of each region _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment()); _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment()); _upper_high_boundary = high_boundary(); // High address of each region _lower_high = low_boundary(); _middle_high = lower_high_boundary(); _upper_high = middle_high_boundary(); // commit to initial size if (committed_size > 0) { if (!expand_by(committed_size)) { return false; } } return true; }
void PSPermGen::compute_new_size(size_t used_before_collection) { // Update our padded average of objects allocated in perm // gen between collections. assert(used_before_collection >= _last_used, "negative allocation amount since last GC?"); const size_t alloc_since_last_gc = used_before_collection - _last_used; _avg_size->sample(alloc_since_last_gc); const size_t current_live = used_in_bytes(); // Stash away the current amount live for the next call to this method. _last_used = current_live; // We have different alignment constraints than the rest of the heap. const size_t alignment = MAX2(MinPermHeapExpansion, virtual_space()->alignment()); // Compute the desired size: // The free space is the newly computed padded average, // so the desired size is what's live + the free space. size_t desired_size = current_live + (size_t)_avg_size->padded_average(); desired_size = align_size_up(desired_size, alignment); // ...and no larger or smaller than our max and min allowed. desired_size = MAX2(MIN2(desired_size, _max_gen_size), _min_gen_size); assert(desired_size <= _max_gen_size, "just checking"); const size_t size_before = _virtual_space->committed_size(); if (desired_size == size_before) { // no change, we're done return; } { // We'll be growing or shrinking the heap: in either case, // we need to hold a lock. MutexLocker x(ExpandHeap_lock); if (desired_size > size_before) { const size_t change_bytes = desired_size - size_before; const size_t aligned_change_bytes = align_size_up(change_bytes, alignment); expand_by(aligned_change_bytes); } else { // Shrinking const size_t change_bytes = size_before - desired_size; const size_t aligned_change_bytes = align_size_down(change_bytes, alignment); shrink(aligned_change_bytes); } } // While this code isn't controlled by AdaptiveSizePolicy, it's // convenient to see all resizing decsions under the same flag. if (PrintAdaptiveSizePolicy) { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); gclog_or_tty->print_cr("AdaptiveSizePolicy::perm generation size: " "collection: %d " "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ", heap->total_collections(), size_before, _virtual_space->committed_size()); } }
Line::Line(PLine master, int start, int width, int force) { init(); if(width <= 0) return; expand_by(width + 1); if(force || (start % iTabWidth)) //Not on the tab boundary master->get_print(start, str, width); else { int pos = 0; char* out = str; char* src = master->str; //Locate starting point in the buffer; while(pos < start) { if(!*src) break; if(*src == '\t') pos = ((pos / iTabWidth + 1) * iTabWidth); else pos++; src++; } if(*src) { while(pos < (start + width)) { if(!*src) break; if(*src == '\t') { int fill = pos; pos = ((pos / iTabWidth + 1) * iTabWidth); if(pos <= (start + width)) { *out++ = *src++; continue; } while(fill++ < (start + width)) *out++ = ' '; } else { pos++; *out++ = *src++; } } } while(pos < (start + width)) { if((pos + iTabWidth) < (start + width)) { pos += iTabWidth; *out++ = '\t'; } else { pos++; *out++ = ' '; } } *out = 0; } recalc_sz(); }
int Line::del_char(int del_pos, int num, Buffer* pBuf) { if(del_pos < 0 || num <= 0) return 0; int slen = len(); if(del_pos >= slen) return 0; Line ln = this; if(!ln.buf_len) //Line points to original buffer expand_by(slen); int tabw = iTabWidth; char *str2 = ln.str; char *out = str; int pos = 0; int chr = 0; for(pos = 0; pos < slen;) { if(*str2 == '\t') { int fill = pos; pos = ((pos / tabw + 1) * tabw); if(pos <= del_pos || fill >= del_pos) { if(fill == del_pos) { chr = *str2++; while(fill++ < pos && --num) del_pos++; } else *out++ = *str2++; continue; } while(fill < pos) { if(fill == del_pos) { chr = ' '; if(--num) del_pos++; } else *out++ = ' '; fill++; } str2++; continue; } if(*str2) { if(pos == del_pos) { chr = *str2++; if(--num) del_pos++; } else *out++ = *str2++; pos++; } else pos++; } *out = 0; recalc_sz(); if(pBuf) { PLine diff = ln.gen_diff(this); pBuf->track_line(diff, this); } return chr; }
int Line::ins_char(int ins_pos, char* chr, int num, Buffer* pBuf) { if(ins_pos < 0 || !chr || num <= 0) return 1; int slen = len(); //Make a copy of the existing string Line ln = this; if(!ln.buf_len) //Line points to original buffer expand_by(slen); int new_len = max(slen, ins_pos); int tabw = iTabWidth; int ins_len = 0; for(int i = 0; i < num; i++) ins_len += (chr[i] == '\t') ? tabw:1; check_size(new_len + ins_len + tabw); char *str2 = ln.str; char *out = str; int pos = 0; int rc = (chr[0] == '\t') ? ((ins_pos / iTabWidth + 1) * iTabWidth - ins_pos) : 1; for(pos = 0; pos <= new_len;) { if(pos >= slen) //Insertion is past the end of the line { if(pos == ins_pos) while(num--) *out++ = *chr++; else if(pos < new_len) *out++ = ' '; pos++; continue; } if(*str2 == '\t') //Process TAB { int fill = pos; pos = ((pos / tabw + 1) * tabw); if(pos <= ins_pos || fill >= ins_pos) { if(fill == ins_pos) { while(num--) *out++ = *chr++; } *out++ = *str2++; continue; } while(fill < pos) { if(fill == ins_pos) while(num--) *out++ = *chr++; *out++ = ' '; fill++; } str2++; continue; } if(*str2) //Usual character { if(pos == ins_pos) while(num--) *out++ = *chr++; *out++ = *str2++; pos++; } else pos++; } *out = 0; recalc_sz(); if(pBuf) { PLine diff = ln.gen_diff(this); pBuf->track_line(diff, this); } return rc; }