inline void oopDesc::incr_age() { assert(!is_forwarded(), "Attempt to increment age of forwarded mark"); if (has_displaced_mark()) { set_displaced_mark(displaced_mark()->incr_age()); } else { set_mark(mark()->incr_age()); } }
// The following method needs to be MT safe. inline int oopDesc::age() const { assert(!is_forwarded(), "Attempt to read age from forwarded mark"); if (has_displaced_mark()) { return displaced_mark()->age(); } else { return mark()->age(); } }
inline oop oopDesc::forward_to_atomic(oop p) { assert(ParNewGeneration::is_legal_forward_ptr(p), "illegal forwarding pointer value."); markOop oldMark = mark(); markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p); markOop curMark; assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable"); assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this."); while (!oldMark->is_marked()) { curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark); assert(is_forwarded(), "object should have been forwarded"); if (curMark == oldMark) { return NULL; } // If the CAS was unsuccessful then curMark->is_marked() // should return true as another thread has CAS'd in another // forwarding pointer. oldMark = curMark; } return forwardee(); }
inline int oopDesc::size_given_klass(Klass* klass) { int lh = klass->layout_helper(); int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize // lh is now a value computed at class initialization that may hint // at the size. For instances, this is positive and equal to the // size. For arrays, this is negative and provides log2 of the // array element size. For other oops, it is zero and thus requires // a virtual call. // // We go to all this trouble because the size computation is at the // heart of phase 2 of mark-compaction, and called for every object, // alive or dead. So the speed here is equal in importance to the // speed of allocation. if (lh <= Klass::_lh_neutral_value) { // The most common case is instances; fall through if so. if (lh < Klass::_lh_neutral_value) { // Second most common case is arrays. We have to fetch the // length of the array, shift (multiply) it appropriately, // up to wordSize, add the header, and align to object size. size_t size_in_bytes; #ifdef _M_IA64 // The Windows Itanium Aug 2002 SDK hoists this load above // the check for s < 0. An oop at the end of the heap will // cause an access violation if this load is performed on a non // array oop. Making the reference volatile prohibits this. // (%%% please explain by what magic the length is actually fetched!) volatile int *array_length; array_length = (volatile int *)( (intptr_t)this + arrayOopDesc::length_offset_in_bytes() ); assert(array_length > 0, "Integer arithmetic problem somewhere"); // Put into size_t to avoid overflow. size_in_bytes = (size_t) array_length; size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh); #else size_t array_length = (size_t) ((arrayOop)this)->length(); size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh); #endif size_in_bytes += Klass::layout_helper_header_size(lh); // This code could be simplified, but by keeping array_header_in_bytes // in units of bytes and doing it this way we can round up just once, // skipping the intermediate round to HeapWordSize. Cast the result // of round_to to size_t to guarantee unsigned division == right shift. s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize); // UseParNewGC, UseParallelGC and UseG1GC can change the length field // of an "old copy" of an object array in the young gen so it indicates // the grey portion of an already copied array. This will cause the first // disjunct below to fail if the two comparands are computed across such // a concurrent change. // UseParNewGC also runs with promotion labs (which look like int // filler arrays) which are subject to changing their declared size // when finally retiring a PLAB; this also can cause the first disjunct // to fail for another worker thread that is concurrently walking the block // offset table. Both these invariant failures are benign for their // current uses; we relax the assertion checking to cover these two cases below: // is_objArray() && is_forwarded() // covers first scenario above // || is_typeArray() // covers second scenario above // If and when UseParallelGC uses the same obj array oop stealing/chunking // technique, we will need to suitably modify the assertion. assert((s == klass->oop_size(this)) || (Universe::heap()->is_gc_active() && ((is_typeArray() && UseParNewGC) || (is_objArray() && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))), "wrong array object size"); } else { // Must be zero, so bite the bullet and take the virtual call. s = klass->oop_size(this); } } assert(s % MinObjAlignment == 0, "alignment check"); assert(s > 0, "Bad size calculated"); return s; }