/* First we need to determine if a particular virtual space is using large pages. This is done at the initialize function and only virtual spaces that are larger than LargePageSizeInBytes use large pages. Once we have determined this, all expand_by and shrink_by calls must grow and shrink by large page size chunks. If a particular request is within the current large page, the call to commit and uncommit memory can be ignored. In the case that the low and high boundaries of this space is not large page aligned, the pages leading to the first large page address and the pages after the last large page address must be allocated with default pages. */ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) { if (uncommitted_size() < bytes) return false; if (special()) { // don't commit memory if the entire space is pinned in memory _high += bytes; return true; } char* previous_high = high(); char* unaligned_new_high = high() + bytes; assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary"); // Calculate where the new high for each of the regions should be. If // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned // then the unaligned lower and upper new highs would be the // lower_high() and upper_high() respectively. char* unaligned_lower_new_high = MIN2(unaligned_new_high, lower_high_boundary()); char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary()); char* unaligned_upper_new_high = MIN2(unaligned_new_high, upper_high_boundary()); // Align the new highs based on the regions alignment. lower and upper // alignment will always be default page size. middle alignment will be // LargePageSizeInBytes if the actual size of the virtual space is in // fact larger than LargePageSizeInBytes. char* aligned_lower_new_high = (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); char* aligned_middle_new_high = (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); char* aligned_upper_new_high = (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); // Determine which regions need to grow in this expand_by call. // If you are growing in the lower region, high() must be in that // region so calcuate the size based on high(). For the middle and // upper regions, determine the starting point of growth based on the // location of high(). By getting the MAX of the region's low address // (or the prevoius region's high address) and high(), we can tell if it // is an intra or inter region growth. size_t lower_needs = 0; if (aligned_lower_new_high > lower_high()) { lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char)); } size_t middle_needs = 0; if (aligned_middle_new_high > middle_high()) { middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char)); } size_t upper_needs = 0; if (aligned_upper_new_high > upper_high()) { upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char)); } // Check contiguity. assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(), "high address must be contained within the region"); assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(), "high address must be contained within the region"); assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(), "high address must be contained within the region"); // Commit regions if (lower_needs > 0) { assert(low_boundary() <= lower_high() && lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region"); if (!os::commit_memory(lower_high(), lower_needs, _executable)) { debug_only(warning("os::commit_memory failed")); return false; } else { _lower_high += lower_needs; } } if (middle_needs > 0) { assert(lower_high_boundary() <= middle_high() && middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region"); if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(), _executable)) { debug_only(warning("os::commit_memory failed")); return false; } _middle_high += middle_needs; } if (upper_needs > 0) { assert(middle_high_boundary() <= upper_high() && upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region"); if (!os::commit_memory(upper_high(), upper_needs, _executable)) { debug_only(warning("os::commit_memory failed")); return false; } else { _upper_high += upper_needs; } } if (pre_touch || AlwaysPreTouch) { int vm_ps = os::vm_page_size(); for (char* curr = previous_high; curr < unaligned_new_high; curr += vm_ps) { // Note the use of a write here; originally we tried just a read, but // since the value read was unused, the optimizer removed the read. // If we ever have a concurrent touchahead thread, we'll want to use // a read, to avoid the potential of overwriting data (if a mutator // thread beats the touchahead thread to a page). There are various // ways of making sure this read is not optimized away: for example, // generating the code for a read procedure at runtime. *curr = 0; } } _high += bytes; return true; }
// A page is uncommitted if the contents of the entire page is deemed unusable. // Continue to decrement the high() pointer until it reaches a page boundary // in which case that particular page can now be uncommitted. void VirtualSpace::shrink_by(size_t size) { if (committed_size() < size) fatal("Cannot shrink virtual space to negative size"); if (special()) { // don't uncommit if the entire space is pinned in memory _high -= size; return; } char* unaligned_new_high = high() - size; assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary"); // Calculate new unaligned address char* unaligned_upper_new_high = MAX2(unaligned_new_high, middle_high_boundary()); char* unaligned_middle_new_high = MAX2(unaligned_new_high, lower_high_boundary()); char* unaligned_lower_new_high = MAX2(unaligned_new_high, low_boundary()); // Align address to region's alignment char* aligned_upper_new_high = (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); char* aligned_middle_new_high = (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); char* aligned_lower_new_high = (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); // Determine which regions need to shrink size_t upper_needs = 0; if (aligned_upper_new_high < upper_high()) { upper_needs = pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char)); } size_t middle_needs = 0; if (aligned_middle_new_high < middle_high()) { middle_needs = pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char)); } size_t lower_needs = 0; if (aligned_lower_new_high < lower_high()) { lower_needs = pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char)); } // Check contiguity. assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(), "high address must be contained within the region"); assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(), "high address must be contained within the region"); assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(), "high address must be contained within the region"); // Uncommit if (upper_needs > 0) { assert(middle_high_boundary() <= aligned_upper_new_high && aligned_upper_new_high + upper_needs <= upper_high_boundary(), "must not shrink beyond region"); if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) { debug_only(warning("os::uncommit_memory failed")); return; } else { _upper_high -= upper_needs; } } if (middle_needs > 0) { assert(lower_high_boundary() <= aligned_middle_new_high && aligned_middle_new_high + middle_needs <= middle_high_boundary(), "must not shrink beyond region"); if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) { debug_only(warning("os::uncommit_memory failed")); return; } else { _middle_high -= middle_needs; } } if (lower_needs > 0) { assert(low_boundary() <= aligned_lower_new_high && aligned_lower_new_high + lower_needs <= lower_high_boundary(), "must not shrink beyond region"); if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) { debug_only(warning("os::uncommit_memory failed")); return; } else { _lower_high -= lower_needs; } } _high -= size; }
/* First we need to determine if a particular virtual space is using large pages. This is done at the initialize function and only virtual spaces that are larger than LargePageSizeInBytes use large pages. Once we have determined this, all expand_by and shrink_by calls must grow and shrink by large page size chunks. If a particular request is within the current large page, the call to commit and uncommit memory can be ignored. In the case that the low and high boundaries of this space is not large page aligned, the pages leading to the first large page address and the pages after the last large page address must be allocated with default pages. */ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) { if (uncommitted_size() < bytes) return false; if (special()) { // don't commit memory if the entire space is pinned in memory _high += bytes; return true; } char* previous_high = high(); char* unaligned_new_high = high() + bytes; assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary"); // Calculate where the new high for each of the regions should be. If // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned // then the unaligned lower and upper new highs would be the // lower_high() and upper_high() respectively. char* unaligned_lower_new_high = MIN2(unaligned_new_high, lower_high_boundary()); char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary()); char* unaligned_upper_new_high = MIN2(unaligned_new_high, upper_high_boundary()); // Align the new highs based on the regions alignment. lower and upper // alignment will always be default page size. middle alignment will be // LargePageSizeInBytes if the actual size of the virtual space is in // fact larger than LargePageSizeInBytes. char* aligned_lower_new_high = (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); char* aligned_middle_new_high = (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); char* aligned_upper_new_high = (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); // Determine which regions need to grow in this expand_by call. // If you are growing in the lower region, high() must be in that // region so calcuate the size based on high(). For the middle and // upper regions, determine the starting point of growth based on the // location of high(). By getting the MAX of the region's low address // (or the prevoius region's high address) and high(), we can tell if it // is an intra or inter region growth. size_t lower_needs = 0; if (aligned_lower_new_high > lower_high()) { lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char)); } size_t middle_needs = 0; if (aligned_middle_new_high > middle_high()) { middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char)); } size_t upper_needs = 0; if (aligned_upper_new_high > upper_high()) { upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char)); } // Check contiguity. assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(), "high address must be contained within the region"); assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(), "high address must be contained within the region"); assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(), "high address must be contained within the region"); // Commit regions if (lower_needs > 0) { assert(low_boundary() <= lower_high() && lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region"); if (!os::commit_memory(lower_high(), lower_needs, _executable)) { debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT ", lower_needs=" SIZE_FORMAT ", %d) failed", lower_high(), lower_needs, _executable);) return false;