예제 #1
0
파일: dump.cpp 프로젝트: ericbbcc/hotspot
 void do_object(oop obj) {
   if (!obj->is_shared() &&
       !obj->is_forwarded()) {
     ++count;
     if (Verbose) {
       tty->print("Unreferenced object: ");
       obj->print_on(tty);
     }
   }
 }
예제 #2
0
파일: dump.cpp 프로젝트: ericbbcc/hotspot
static bool mark_object(oop obj) {
  if (obj != NULL &&
      !obj->is_shared() &&
      !obj->is_forwarded() &&
      !obj->is_gc_marked()) {
    obj->set_mark(markOopDesc::prototype()->set_marked());
    return true;
  }

  return false;
}
예제 #3
0
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
  assert(from_obj->is_forwarded(), "from obj should be forwarded");
  assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
  assert(from_obj != to_obj, "should not be self-forwarded");

  assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
  assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");

  // The object might be in the process of being copied by another
  // worker so we cannot trust that its to-space image is
  // well-formed. So we have to read its size from its from-space
  // image which we know should not be changing.
  _cm->grayRoot(to_obj);
}
oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
  assert(is_in_reserved(old) && !old->is_forwarded(),
	 "shouldn't be scavenging this oop"); 
  size_t s = old->size();
  oop obj = NULL;
  
  // Try allocating obj in to-space (unless too old or won't fit or JVMPI
  // enabled)
  if (old->age() < tenuring_threshold() &&
      !Universe::jvmpi_slow_allocation()) {
    obj = (oop) to()->allocate(s);
  }

  // Otherwise try allocating obj tenured
  if (obj == NULL) {
    obj = _next_gen->promote(old, s, from);
    if (obj == NULL) {
      // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
      // is incorrectly set. In any case, its seriously wrong to be here!
      vm_exit_out_of_memory(s*wordSize, "promotion");
    }
  } else {
    // Prefetch beyond obj
    const intx interval = PrefetchCopyIntervalInBytes;
    atomic::prefetch_write(obj, interval);

    // Copy obj
    Memory::copy_words_aligned((HeapWord*)old, (HeapWord*)obj, s);

    // Increment age if obj still in new generation
    obj->incr_age(); 
    age_table()->add(obj, s);
  }

  if (Universe::jvmpi_move_event_enabled()) {
    Universe::jvmpi_object_move(old, obj);
  }

  // Done, insert forward pointer to obj in this header
  old->forward_to(obj);

  return obj;
}
oop DefNewGeneration::copy_to_survivor_space(oop old) {
  assert(is_in_reserved(old) && !old->is_forwarded(),
         "shouldn't be scavenging this oop");
  size_t s = old->size();
  oop obj = NULL;

  // Try allocating obj in to-space (unless too old)
  if (old->age() < tenuring_threshold()) {
    obj = (oop) to()->allocate(s);
  }

  // Otherwise try allocating obj tenured
  if (obj == NULL) {
    obj = _next_gen->promote(old, s);
    if (obj == NULL) {
      if (!HandlePromotionFailure) {
        // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
        // is incorrectly set. In any case, its seriously wrong to be here!
        vm_exit_out_of_memory(s*wordSize, "promotion");
      }

      handle_promotion_failure(old);
      return old;
    }
  } else {
    // Prefetch beyond obj
    const intx interval = PrefetchCopyIntervalInBytes;
    Prefetch::write(obj, interval);

    // Copy obj
    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);

    // Increment age if obj still in new generation
    obj->incr_age();
    age_table()->add(obj, s);
  }

  // Done, insert forward pointer to obj in this header
  old->forward_to(obj);

  return obj;
}
oop DefNewGeneration::copy_to_survivor_space(oop old) {
  assert(is_in_reserved(old) && !old->is_forwarded(),
         "shouldn't be scavenging this oop");
  size_t s = old->size();
  oop obj = NULL;

  // Try allocating obj in to-space (unless too old)
  if (old->age() < tenuring_threshold()) {
    obj = (oop) to()->allocate(s);
  }

  // Otherwise try allocating obj tenured
  if (obj == NULL) {
    obj = _next_gen->promote(old, s);
    if (obj == NULL) {
      handle_promotion_failure(old);
      return old;
    }
  } else {
    // Prefetch beyond obj
    const intx interval = PrefetchCopyIntervalInBytes;
    Prefetch::write(obj, interval);

    // Copy obj
    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);

    // Increment age if obj still in new generation
    obj->incr_age();
    age_table()->add(obj, s);
  }

  // Done, insert forward pointer to obj in this header
  old->forward_to(obj);

  return obj;
}
bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
  return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
}
 bool do_object_b(oop p) {
   return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
 }
  virtual void do_object(oop obj) {
    if (obj->is_forwarded()) {
obj->clear_fwd_ptr_mark();
    }
  }
예제 #10
0
  // <original comment>
  // The original idea here was to coalesce evacuated and dead objects.
  // However that caused complications with the block offset table (BOT).
  // In particular if there were two TLABs, one of them partially refined.
  // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
  // The BOT entries of the unrefined part of TLAB_2 point to the start
  // of TLAB_2. If the last object of the TLAB_1 and the first object
  // of TLAB_2 are coalesced, then the cards of the unrefined part
  // would point into middle of the filler object.
  // The current approach is to not coalesce and leave the BOT contents intact.
  // </original comment>
  //
  // We now reset the BOT when we start the object iteration over the
  // region and refine its entries for every object we come across. So
  // the above comment is not really relevant and we should be able
  // to coalesce dead objects if we want to.
  void do_object(oop obj) {
    HeapWord* obj_addr = (HeapWord*) obj;
    assert(_hr->is_in(obj_addr), "sanity");
    size_t obj_size = obj->size();
    HeapWord* obj_end = obj_addr + obj_size;

    if (_end_of_last_gap != obj_addr) {
      // there was a gap before obj_addr
      _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
    }

    if (obj->is_forwarded() && obj->forwardee() == obj) {
      // The object failed to move.

      // We consider all objects that we find self-forwarded to be
      // live. What we'll do is that we'll update the prev marking
      // info so that they are all under PTAMS and explicitly marked.
      if (!_cm->isPrevMarked(obj)) {
        _cm->markPrev(obj);
      }
      if (_during_initial_mark) {
        // For the next marking info we'll only mark the
        // self-forwarded objects explicitly if we are during
        // initial-mark (since, normally, we only mark objects pointed
        // to by roots if we succeed in copying them). By marking all
        // self-forwarded objects we ensure that we mark any that are
        // still pointed to be roots. During concurrent marking, and
        // after initial-mark, we don't need to mark any objects
        // explicitly and all objects in the CSet are considered
        // (implicitly) live. So, we won't mark them explicitly and
        // we'll leave them over NTAMS.
        _cm->grayRoot(obj, obj_size, _worker_id, _hr);
      }
      _marked_bytes += (obj_size * HeapWordSize);
      obj->set_mark(markOopDesc::prototype());

      // While we were processing RSet buffers during the collection,
      // we actually didn't scan any cards on the collection set,
      // since we didn't want to update remembered sets with entries
      // that point into the collection set, given that live objects
      // from the collection set are about to move and such entries
      // will be stale very soon.
      // This change also dealt with a reliability issue which
      // involved scanning a card in the collection set and coming
      // across an array that was being chunked and looking malformed.
      // The problem is that, if evacuation fails, we might have
      // remembered set entries missing given that we skipped cards on
      // the collection set. So, we'll recreate such entries now.
      obj->oop_iterate(_update_rset_cl);
    } else {

      // The object has been either evacuated or is dead. Fill it with a
      // dummy object.
      MemRegion mr(obj_addr, obj_size);
      CollectedHeap::fill_with_object(mr);

      // must nuke all dead objects which we skipped when iterating over the region
      _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
    }
    _end_of_last_gap = obj_end;
    _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
  }
 virtual void do_object(oop obj) {
   if (obj->is_forwarded()) {
     obj->init_mark();
   }
 }
예제 #12
0
 bool do_object_b(oop p) {
   return (HeapWord*)p >= PSScavenge::_eden_boundary || p->is_forwarded();
 }
예제 #13
0
inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
  bool result =  (obj->is_forwarded() && (obj->forwardee()== obj));
  return result;
}