// used only for asserts inline bool oopDesc::is_oop(bool ignore_mark_word) const { oop obj = (oop) this; if (!check_obj_alignment(obj)) return false; if (!Universe::heap()->is_in(obj)) return false; // obj is aligned and accessible in heap // try to find metaclass cycle safely without seg faulting on bad input // we should reach klassKlassObj by following klass link at most 3 times for (int i = 0; i < 3; i++) { obj = obj->klass(); // klass should be aligned and in permspace if (!check_obj_alignment(obj)) return false; if (!Universe::heap()->is_in_permanent(obj)) return false; } if (obj != Universe::klassKlassObj()) { // During a dump, the _klassKlassObj moved to a shared space. if (DumpSharedSpaces && Universe::klassKlassObj()->is_shared()) { return true; } return false; } // Header verification: the mark is typically non-NULL. If we're // at a safepoint, it must not be null. // Outside of a safepoint, the header could be changing (for example, // another thread could be inflating a lock on this object). if (ignore_mark_word) { return true; } if (mark() != NULL) { return true; } return !SafepointSynchronize::is_at_safepoint(); }
inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) { assert(!is_null(v), "narrow oop value can never be zero"); address base = Universe::narrow_oop_base(); int shift = Universe::narrow_oop_shift(); oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift)); assert(check_obj_alignment(result), err_msg("address not aligned: " INTPTR_FORMAT, p2i((void*) result))); return result; }
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) { assert(!is_null(v), "oop value can never be zero"); assert(check_obj_alignment(v), "Address not aligned"); assert(Universe::heap()->is_in_reserved(v), "Address not in heap"); address base = Universe::narrow_oop_base(); int shift = Universe::narrow_oop_shift(); uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1)); assert(OopEncodingHeapMax > pd, "change encoding max if new encoding"); uint64_t result = pd >> shift; assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow"); assert(decode_heap_oop(result) == v, "reversibility"); return (narrowOop)result; }
inline void G1UpdateRSOrPushRefOopClosure::do_oop_work(T* p) { oop obj = oopDesc::load_decode_heap_oop(p); if (obj == NULL) { return; } #ifdef ASSERT // can't do because of races // assert(obj == NULL || obj->is_oop(), "expected an oop"); assert(check_obj_alignment(obj), "not oop aligned"); assert(_g1->is_in_reserved(obj), "must be in heap"); #endif // ASSERT assert(_from != NULL, "from region must be non-NULL"); assert(_from->is_in_reserved(p) || (_from->is_humongous() && _g1->heap_region_containing(p)->is_humongous() && _from->humongous_start_region() == _g1->heap_region_containing(p)->humongous_start_region()), "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.", p2i(p), _from->hrm_index(), _from->humongous_start_region()->hrm_index()); HeapRegion* to = _g1->heap_region_containing(obj); if (_from == to) { // Normally this closure should only be called with cross-region references. // But since Java threads are manipulating the references concurrently and we // reload the values things may have changed. // Also this check lets slip through references from a humongous continues region // to its humongous start region, as they are in different regions, and adds a // remembered set entry. This is benign (apart from memory usage), as we never // try to either evacuate or eager reclaim these kind of regions. return; } // The _record_refs_into_cset flag is true during the RSet // updating part of an evacuation pause. It is false at all // other times: // * rebuilding the remembered sets after a full GC // * during concurrent refinement. // * updating the remembered sets of regions in the collection // set in the event of an evacuation failure (when deferred // updates are enabled). if (_record_refs_into_cset && to->in_collection_set()) { // We are recording references that point into the collection // set and this particular reference does exactly that... // If the referenced object has already been forwarded // to itself, we are handling an evacuation failure and // we have already visited/tried to copy this object // there is no need to retry. if (!self_forwarded(obj)) { assert(_push_ref_cl != NULL, "should not be null"); // Push the reference in the refs queue of the G1ParScanThreadState // instance for this worker thread. _push_ref_cl->do_oop(p); } // Deferred updates to the CSet are either discarded (in the normal case), // or processed (if an evacuation failure occurs) at the end // of the collection. // See G1RemSet::cleanup_after_oops_into_collection_set_do(). } else { // We either don't care about pushing references that point into the // collection set (i.e. we're not during an evacuation pause) _or_ // the reference doesn't point into the collection set. Either way // we add the reference directly to the RSet of the region containing // the referenced object. assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); to->rem_set()->add_reference(p, _worker_i); } }
inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) { oop result = unsafe_decode_heap_oop_not_null(v); assert(check_obj_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result)); return result; }