void TrainGeneration::weak_ref_barrier_check(oop* p) { if (is_in_reserved(p) && is_in_reserved(*p)) { // Update train remembered set if reference is in this generation. // This check is required whether the object moved or not; for // example, we might just be scanning followers. oop_update_remembered_set(p); } else { // During weak reference discovery, sometimes p is not in the // heap: it might be one of the lists. if (Universe::heap()->is_in_reserved(p)) { // A later generation might want to examine this reference. _ct->ct_bs()->inline_write_ref_field(p, *p); } } }
oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) { assert(is_in_reserved(old) && !old->is_forwarded(), "shouldn't be scavenging this oop"); size_t s = old->size(); oop obj = NULL; // Try allocating obj in to-space (unless too old or won't fit or JVMPI // enabled) if (old->age() < tenuring_threshold() && !Universe::jvmpi_slow_allocation()) { obj = (oop) to()->allocate(s); } // Otherwise try allocating obj tenured if (obj == NULL) { obj = _next_gen->promote(old, s, from); if (obj == NULL) { // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag // is incorrectly set. In any case, its seriously wrong to be here! vm_exit_out_of_memory(s*wordSize, "promotion"); } } else { // Prefetch beyond obj const intx interval = PrefetchCopyIntervalInBytes; atomic::prefetch_write(obj, interval); // Copy obj Memory::copy_words_aligned((HeapWord*)old, (HeapWord*)obj, s); // Increment age if obj still in new generation obj->incr_age(); age_table()->add(obj, s); } if (Universe::jvmpi_move_event_enabled()) { Universe::jvmpi_object_move(old, obj); } // Done, insert forward pointer to obj in this header old->forward_to(obj); return obj; }
oop DefNewGeneration::copy_to_survivor_space(oop old) { assert(is_in_reserved(old) && !old->is_forwarded(), "shouldn't be scavenging this oop"); size_t s = old->size(); oop obj = NULL; // Try allocating obj in to-space (unless too old) if (old->age() < tenuring_threshold()) { obj = (oop) to()->allocate(s); } // Otherwise try allocating obj tenured if (obj == NULL) { obj = _next_gen->promote(old, s); if (obj == NULL) { if (!HandlePromotionFailure) { // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag // is incorrectly set. In any case, its seriously wrong to be here! vm_exit_out_of_memory(s*wordSize, "promotion"); } handle_promotion_failure(old); return old; } } else { // Prefetch beyond obj const intx interval = PrefetchCopyIntervalInBytes; Prefetch::write(obj, interval); // Copy obj Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); // Increment age if obj still in new generation obj->incr_age(); age_table()->add(obj, s); } // Done, insert forward pointer to obj in this header old->forward_to(obj); return obj; }
oop DefNewGeneration::copy_to_survivor_space(oop old) { assert(is_in_reserved(old) && !old->is_forwarded(), "shouldn't be scavenging this oop"); size_t s = old->size(); oop obj = NULL; // Try allocating obj in to-space (unless too old) if (old->age() < tenuring_threshold()) { obj = (oop) to()->allocate(s); } // Otherwise try allocating obj tenured if (obj == NULL) { obj = _next_gen->promote(old, s); if (obj == NULL) { handle_promotion_failure(old); return old; } } else { // Prefetch beyond obj const intx interval = PrefetchCopyIntervalInBytes; Prefetch::write(obj, interval); // Copy obj Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); // Increment age if obj still in new generation obj->incr_age(); age_table()->add(obj, s); } // Done, insert forward pointer to obj in this header old->forward_to(obj); return obj; }
void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { assert(!mr.is_empty(), "Should be non-empty"); assert(used_region().contains(mr), "Should be within used space"); HeapWord* prev = cl->previous(); // max address from last time if (prev >= mr.end()) { // nothing to do return; } // See comment above (in more general method above) in case you // happen to use this method. assert(prev == NULL || is_in_reserved(prev), "Should be within space"); bool last_was_obj_array = false; HeapWord *obj_start_addr, *region_start_addr; if (prev > mr.start()) { region_start_addr = prev; obj_start_addr = prev; assert(obj_start_addr == block_start(region_start_addr), "invariant"); } else { region_start_addr = mr.start(); obj_start_addr = block_start(region_start_addr); } HeapWord* region_end_addr = mr.end(); MemRegion derived_mr(region_start_addr, region_end_addr); while (obj_start_addr < region_end_addr) { oop obj = oop(obj_start_addr); const size_t size = obj->size(); last_was_obj_array = cl->do_object_bm(obj, derived_mr); obj_start_addr += size; } if (!last_was_obj_array) { assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()), "Should be within (closed) used space"); assert(obj_start_addr > prev, "Invariant"); cl->set_previous(obj_start_addr); // min address for next time } }
// Don't implement this by using is_in_young(). This method is used // in some cases to check that is_in_young() is correct. bool ParallelScavengeHeap::is_in_partial_collection(const void *p) { assert(is_in_reserved(p) || p == NULL, "Does not work if address is non-null and outside of the heap"); // The order of the generations is old (low addr), young (high addr) return p >= old_gen()->reserved().end(); }
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { assert(is_in_reserved(addr), err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")", p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end()))); return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); }