Esempio n. 1
0
oop objVectorMap::clone(oop obj, bool mustAllocate, oop genObj) {
  assert_objVector(obj, "not an obj vector");
  objVectorOop v= objVectorOop(obj)->copy(mustAllocate, genObj);
  if (oop(v) != failedAllocationOop) v->init_mark();
  return v;
}
Esempio n. 2
0
oop processOopClass::ActivationStack_prim(void *FH) {
  ResourceMark rm;
  Process* p = checkProcess(this);
  if (p == currentProcess)
    p->killVFrameOopsAndSetWatermark( p->last_self_frame(false));
  if (!p) {
    prim_failure(FH, NOPROCESSERROR);
    return NULL;
  }
  FlushRegisterWindows();

  vframeBList = new VFrameBList(1000);

  p->stack()->vframes_do(addVFrameToList);

  bool hideFirst = p != vmProcess;
  if (vframeBList->nonEmpty() && hideFirst) (void) vframeBList->pop();

  smi len= vframeBList->length();
  objVectorOop resultVec= Memory->objVectorObj->cloneSize(len, CANFAIL);
  if (oop(resultVec) == failedAllocationOop) {
    out_of_memory_failure(FH, Memory->objVectorObj->size() + len);
    return NULL;
  }

  oop*         resultp   = resultVec->objs();

  // Build the result as a merge of vframeBList and existing vframeOops
  // and save mirrors of all the resulting vframeOops.
  vframeOop prev  = vframeList();
  vframeOop merge = prev->next();
  for (int i = 0; i < vframeBList->length(); i++) {
    abstract_vframe* vf = vframeBList->nth(i);
    mirrorOop mirror;
    if (merge && merge->is_equal(vf)) {
      mirror= merge->as_mirror_or_fail();
      if (oop(mirror) == failedAllocationOop) {
        out_of_memory_failure(FH);
        return NULL;
      }
      prev  = merge;
      merge = merge->next();
    } else {
      vframeOop vfo= clone_vframeOop(vf, p, CANFAIL);
      if (oop(vfo) == failedAllocationOop) {
        out_of_memory_failure(FH);
        return NULL;
      }
      mirror= vfo->as_mirror_or_fail();
      if (oop(mirror) == failedAllocationOop) {
        out_of_memory_failure(FH);
        return NULL;
      }
      vfo->insertAfter(prev);
      prev = vfo;
    }
    Memory->store(resultp++, mirror);
  }
  if (p == currentProcess)
    p->killVFrameOopsAndSetWatermark( p->last_self_frame(false));
  return resultVec;
}
Esempio n. 3
0
void InterpretedIC::clear_without_deallocation_pic() {
  if (is_empty()) return;
  set(Bytecodes::original_send_code_for(send_code()), oop(selector()), smiOop_zero);
}
void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
                                                 HeapWord* obj,
                                                 size_t size) {
  post_allocation_setup_no_klass_install(klass, obj, size);
  post_allocation_install_obj_klass(klass, oop(obj), (int) size);
}
Esempio n. 5
0
HeapWord*
HeapRegion::
oops_on_card_seq_iterate_careful(MemRegion mr,
                                 FilterOutOfRegionClosure* cl,
                                 bool filter_young,
                                 jbyte* card_ptr) {
  // Currently, we should only have to clean the card if filter_young
  // is true and vice versa.
  if (filter_young) {
    assert(card_ptr != NULL, "pre-condition");
  } else {
    assert(card_ptr == NULL, "pre-condition");
  }
  G1CollectedHeap* g1h = G1CollectedHeap::heap();

  // If we're within a stop-world GC, then we might look at a card in a
  // GC alloc region that extends onto a GC LAB, which may not be
  // parseable.  Stop such at the "saved_mark" of the region.
  if (g1h->is_gc_active()) {
    mr = mr.intersection(used_region_at_save_marks());
  } else {
    mr = mr.intersection(used_region());
  }
  if (mr.is_empty()) return NULL;
  // Otherwise, find the obj that extends onto mr.start().

  // The intersection of the incoming mr (for the card) and the
  // allocated part of the region is non-empty. This implies that
  // we have actually allocated into this region. The code in
  // G1CollectedHeap.cpp that allocates a new region sets the
  // is_young tag on the region before allocating. Thus we
  // safely know if this region is young.
  if (is_young() && filter_young) {
    return NULL;
  }

  assert(!is_young(), "check value of filter_young");

  // We can only clean the card here, after we make the decision that
  // the card is not young. And we only clean the card if we have been
  // asked to (i.e., card_ptr != NULL).
  if (card_ptr != NULL) {
    *card_ptr = CardTableModRefBS::clean_card_val();
    // We must complete this write before we do any of the reads below.
    OrderAccess::storeload();
  }

  // Cache the boundaries of the memory region in some const locals
  HeapWord* const start = mr.start();
  HeapWord* const end = mr.end();

  // We used to use "block_start_careful" here.  But we're actually happy
  // to update the BOT while we do this...
  HeapWord* cur = block_start(start);
  assert(cur <= start, "Postcondition");

  oop obj;

  HeapWord* next = cur;
  while (next <= start) {
    cur = next;
    obj = oop(cur);
    if (obj->klass_or_null() == NULL) {
      // Ran into an unparseable point.
      return cur;
    }
    // Otherwise...
    next = (cur + obj->size());
  }

  // If we finish the above loop...We have a parseable object that
  // begins on or before the start of the memory region, and ends
  // inside or spans the entire region.

  assert(obj == oop(cur), "sanity");
  assert(cur <= start &&
         obj->klass_or_null() != NULL &&
         (cur + obj->size()) > start,
         "Loop postcondition");

  if (!g1h->is_obj_dead(obj)) {
    obj->oop_iterate(cl, mr);
  }

  while (cur < end) {
    obj = oop(cur);
    if (obj->klass_or_null() == NULL) {
      // Ran into an unparseable point.
      return cur;
    };

    // Otherwise:
    next = (cur + obj->size());

    if (!g1h->is_obj_dead(obj)) {
      if (next < end || !obj->is_objArray()) {
        // This object either does not span the MemRegion
        // boundary, or if it does it's not an array.
        // Apply closure to whole object.
        obj->oop_iterate(cl);
      } else {
        // This obj is an array that spans the boundary.
        // Stop at the boundary.
        obj->oop_iterate(cl, mr);
      }
    }
    cur = next;
  }
  return NULL;
}
Esempio n. 6
0
nmethod::nmethod(AbstractCompiler* c, bool generateDebugCode) {
  CHECK_VTBL_VALUE;
  _instsLen  = roundTo(iLen, oopSize);
  _locsLen   = ilLen;
  depsLen    = dLen;
  // backpointer is just before deps
  depsAddr   = (nmln*)     ((char*)dAddr + sizeof(nmethod*));

  *dBackLinkAddr() = this;
  
  // Copy the nmethodScopes scopeDescs generated by the ScopeDescRecorder
  // to the allocation area.
  c->scopeDescRecorder()->copyTo((VtblPtr_t*)sAddr, (int32)this);
  
  this->scopes = (nmethodScopes*) sAddr;

  oldCount = 0;
  flags.clear();
  flags.isDebug = generateDebugCode;
  setCompiler(c->name());
  flags.isUncommonRecompiled = currentProcess->isUncommon();
    
  verifiedOffset      = c->verifiedOffset();
  diCheckOffset       = c->diCheckOffset();

  frameCreationOffset = c->frameCreationOffset();
  
  rememberLink.init();
  codeTableLink= NULL;
  diLink.init(c->diLink);
  if (diLink.notEmpty()) flags.isDI = true;
  flags.level = c->level();
  if (flags.level >= MaxRecompilationLevels) { // added = zzzz
    warning1("setting invalid nmethod level %ld", flags.level);  // fix this
    flags.level = 0;
  }
  flags.version = c->version();
  if (c->nmName() == nm_nic && ((FCompiler*)c)->isImpure)
    makeImpureNIC();
  key.set_from(c->L->key);
  check_store();
  
  clear_frame_chain();
  assert(c->frameSize() >= 0, "frame size cannot be negative");
  frame_size = c->frameSize();
  _incoming_arg_count = c->incoming_arg_count();
  get_platform_specific_data(c);

  Assembler* instsA = c->instructions();
  copy_bytes(        instsA->instsStart,        insts(), instsLen());
  copy_words((int32*)instsA->locsStart,  (int32*)locs(),  ilLen/4);
  copy_words((int32*)depsStart,          (int32*)deps(),  depsLen/4);
  
  addrDesc *l, *lend;
  for (l = locs(), lend = locsEnd(); l < lend; l++) {
    l->initialShift(this, (char*)insts() - (char*)instsA->instsStart, 0);
  }

  char* bound = Memory->new_gen->boundary();
  for (l = locs(), lend = locsEnd(); l < lend; l++) {
    if (l->isOop())
      OopNCode::check_store(oop(l->referent(this)), bound); // cfront garbage
    else if (l->isSendDesc()) {
      l->asSendDesc(this)->dependency()->init();
    } else if (l->isDIDesc()) {
      l->asDIDesc(this)->dependency()->init();
      flags.isDI = true; 
    }
  }
  
  for (nmln* d = deps(), *dend = depsEnd(); d < dend; d++) {
    d->relocate();
  }
  
  MachineCache::flush_instruction_cache_range(insts(), instsEnd());
  MachineCache::flush_instruction_cache_for_debugging();
  
  if (this == (nmethod*)catchThisOne) warning("caught nmethod");
}
Esempio n. 7
0
int main(int argc, char **argv)
{
    if (argc != 2)
    {
        std::cout << "Usage: " << argv[0] << " dimensionOfTheProblem" << std::endl;
        exit(0);
    }
    int dim = atoi(argv[1]);

    auto space(std::make_shared<ompl::base::RealVectorStateSpace>(dim));
    ompl::geometric::SimpleSetup ss(space);
    const ompl::base::SpaceInformationPtr &si = ss.getSpaceInformation();
    space->setBounds(-1, 1);

    ss.setStateValidityChecker(std::make_shared<ValidityChecker>(si));

    ompl::base::ScopedState<> start(space), goal(space);
    for (int i = 0; i < dim; ++i)
    {
        start[i] = -1;
        goal[i] = 1;
    }

    ss.setStartAndGoalStates(start, goal);

    // by default, use the Benchmark class
    double runtime_limit = 5, memory_limit = 1024;
    int run_count = 100;
    ompl::tools::Benchmark::Request request(runtime_limit, memory_limit, run_count, 0.05, true, true, false, false);
    ompl::tools::Benchmark b(ss, "Diagonal");

    double range = 0.1 * sqrt(dim);

    auto lengthObj(std::make_shared<ompl::base::PathLengthOptimizationObjective>(si));
    ompl::base::OptimizationObjectivePtr oop((0.5 / sqrt(dim)) * lengthObj);

    ss.setOptimizationObjective(oop);

    bool knn = true;

    auto rrtstar(std::make_shared<ompl::geometric::RRTstar>(si));
    rrtstar->setName("RRT*");
    rrtstar->setDelayCC(true);
    // rrtstar->setFocusSearch(true);
    rrtstar->setRange(range);
    rrtstar->setKNearest(knn);
    b.addPlanner(rrtstar);
    auto rrtsh(std::make_shared<ompl::geometric::RRTsharp>(si));
    rrtsh->setRange(range);
    rrtsh->setKNearest(knn);
    b.addPlanner(rrtsh);
    /*auto rrtsh3(std::make_shared<ompl::geometric::RRTsharp>(si));
    rrtsh3->setName("RRT#v3");
    rrtsh3->setRange(range);
    rrtsh3->setKNearest(knn);
    rrtsh3->setVariant(3);
    b.addPlanner(rrtsh3);
    auto rrtsh2(std::make_shared<ompl::geometric::RRTsharp>(si));
    rrtsh2->setName("RRT#v2");
    rrtsh2->setRange(range);
    rrtsh2->setKNearest(knn);
    rrtsh2->setVariant(2);
    b.addPlanner(rrtsh2);*/
    auto rrtX1(std::make_shared<ompl::geometric::RRTXstatic>(si));
    rrtX1->setName("RRTX0.1");
    rrtX1->setEpsilon(0.1);
    rrtX1->setRange(range);
    // rrtX1->setVariant(3);
    rrtX1->setKNearest(knn);
    b.addPlanner(rrtX1);
    auto rrtX2(std::make_shared<ompl::geometric::RRTXstatic>(si));
    rrtX2->setName("RRTX0.01");
    rrtX2->setEpsilon(0.01);
    rrtX2->setRange(range);
    // rrtX2->setVariant(3);
    rrtX2->setKNearest(knn);
    b.addPlanner(rrtX2);
    auto rrtX3(std::make_shared<ompl::geometric::RRTXstatic>(si));
    rrtX3->setName("RRTX0.001");
    rrtX3->setEpsilon(0.001);
    rrtX3->setRange(range);
    // rrtX3->setVariant(3);
    rrtX3->setKNearest(knn);
    b.addPlanner(rrtX3);
    b.benchmark(request);
    b.saveResultsToFile(boost::str(boost::format("Diagonal.log")).c_str());

    exit(0);
}
HeapWord*
HeapRegion::
oops_on_card_seq_iterate_careful(MemRegion mr,
                                 FilterOutOfRegionClosure* cl,
                                 bool filter_young,
                                 jbyte* card_ptr) {
  // Currently, we should only have to clean the card if filter_young
  // is true and vice versa.
  if (filter_young) {
    assert(card_ptr != NULL, "pre-condition");
  } else {
    assert(card_ptr == NULL, "pre-condition");
  }
  G1CollectedHeap* g1h = G1CollectedHeap::heap();

  // If we're within a stop-world GC, then we might look at a card in a
  // GC alloc region that extends onto a GC LAB, which may not be
  // parseable.  Stop such at the "scan_top" of the region.
  if (g1h->is_gc_active()) {
    mr = mr.intersection(MemRegion(bottom(), scan_top()));
  } else {
    mr = mr.intersection(used_region());
  }
  if (mr.is_empty()) return NULL;
  // Otherwise, find the obj that extends onto mr.start().

  // The intersection of the incoming mr (for the card) and the
  // allocated part of the region is non-empty. This implies that
  // we have actually allocated into this region. The code in
  // G1CollectedHeap.cpp that allocates a new region sets the
  // is_young tag on the region before allocating. Thus we
  // safely know if this region is young.
  if (is_young() && filter_young) {
    return NULL;
  }

  assert(!is_young(), "check value of filter_young");

  // We can only clean the card here, after we make the decision that
  // the card is not young. And we only clean the card if we have been
  // asked to (i.e., card_ptr != NULL).
  if (card_ptr != NULL) {
    *card_ptr = CardTableModRefBS::clean_card_val();
    // We must complete this write before we do any of the reads below.
    OrderAccess::storeload();
  }

  // Cache the boundaries of the memory region in some const locals
  HeapWord* const start = mr.start();
  HeapWord* const end = mr.end();

  // We used to use "block_start_careful" here.  But we're actually happy
  // to update the BOT while we do this...
  HeapWord* cur = block_start(start);
  assert(cur <= start, "Postcondition");

  oop obj;

  HeapWord* next = cur;
  do {
    cur = next;
    obj = oop(cur);
    if (obj->klass_or_null() == NULL) {
      // Ran into an unparseable point.
      return cur;
    }
    // Otherwise...
    next = cur + block_size(cur);
  } while (next <= start);

  // If we finish the above loop...We have a parseable object that
  // begins on or before the start of the memory region, and ends
  // inside or spans the entire region.
  assert(cur <= start, "Loop postcondition");
  assert(obj->klass_or_null() != NULL, "Loop postcondition");

  do {
    obj = oop(cur);
    assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
    if (obj->klass_or_null() == NULL) {
      // Ran into an unparseable point.
      return cur;
    }

    // Advance the current pointer. "obj" still points to the object to iterate.
    cur = cur + block_size(cur);

    if (!g1h->is_obj_dead(obj)) {
      // Non-objArrays are sometimes marked imprecise at the object start. We
      // always need to iterate over them in full.
      // We only iterate over object arrays in full if they are completely contained
      // in the memory region.
      if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
        obj->oop_iterate(cl);
      } else {
        obj->oop_iterate(cl, mr);
      }
    }
  } while (cur < end);

  return NULL;
}
Esempio n. 9
0
 // Set operations
 void set(methodOop method) {
   assert(method->is_method(), "must be method");
   _result = oop(method);
 }
Esempio n. 10
0
 void set(nmethod* nm) {
   assert(oop(nm)->is_smi(), "nmethod must be aligned");
   _result = oop(nm->jump_table_entry()->entry_point());
 }
Esempio n. 11
0
oop G1CMObjArrayProcessor::encode_array_slice(HeapWord* addr) {
  return oop((void*)((uintptr_t)addr | ArraySliceBit));
}
Esempio n. 12
0
void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) {
  while (mark < top()) {
    blk->do_object(oop(mark));
    mark += oop(mark)->size();
  }
}
Esempio n. 13
0
objVectorOop objVectorOopClass::shrink(fint delta, bool mustAllocate) {
  objVectorOop v= (objVectorOop) slotsOopClass::shrink(size(), delta,
                                                       mustAllocate);
  if (oop(v) != failedAllocationOop) v->set_length(length() - delta);
  return v;
}
Esempio n. 14
0
bool frame::has_interpreted_float_marker() const {
  return oop(at(interpreted_frame_float_magic_offset)) == Floats::magic_value();
}
void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
  // Copy all live objects to their new location
  // Used by MarkSweep::mark_sweep_phase4()

  HeapWord*       q = space()->bottom();
  HeapWord* const t = _end_of_live;
  debug_only(HeapWord* prev_q = NULL);

  if (q < t && _first_dead > q &&
      !oop(q)->is_gc_marked()) {
#ifdef ASSERT
    // we have a chunk of the space which hasn't moved and we've reinitialized the
    // mark word during the previous pass, so we can't use is_gc_marked for the
    // traversal.
    HeapWord* const end = _first_dead;

    while (q < end) {
      size_t size = oop(q)->size();
      assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
      debug_only(prev_q = q);
      q += size;
    }
#endif

    if (_first_dead == t) {
      q = t;
    } else {
      // $$$ Funky
      q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();
    }
  }

  const intx scan_interval = PrefetchScanIntervalInBytes;
  const intx copy_interval = PrefetchCopyIntervalInBytes;

  while (q < t) {
    if (!oop(q)->is_gc_marked()) {
      // mark is pointer to next marked oop
      debug_only(prev_q = q);
      q = (HeapWord*) oop(q)->mark()->decode_pointer();
      assert(q > prev_q, "we should be moving forward through memory");
    } else {
      // prefetch beyond q
      Prefetch::read(q, scan_interval);

      // size and destination
      size_t size = oop(q)->size();
      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();

      // prefetch beyond compaction_top
      Prefetch::write(compaction_top, copy_interval);

      // copy object and reinit its mark
      assert(q != compaction_top, "everything in this pass should be moving");
      Copy::aligned_conjoint_words(q, compaction_top, size);
      oop(compaction_top)->init_mark();
      assert(oop(compaction_top)->klass() != NULL, "should have a class");

      debug_only(prev_q = q);
      q += size;
    }
  }

  assert(compaction_top() >= space()->bottom() && compaction_top() <= space()->end(),
         "should point inside space");
  space()->set_top(compaction_top());

  if (mangle_free_space) {
    space()->mangle_unused_area();
  }
}
Esempio n. 16
0
bool frame::has_compiled_float_marker() const {
  return oop(at(compiled_frame_magic_oop_offset)) == Floats::magic_value();
}
void PSMarkSweepDecorator::precompact() {
  // Reset our own compact top.
  set_compaction_top(space()->bottom());

  /* We allow some amount of garbage towards the bottom of the space, so
   * we don't start compacting before there is a significant gain to be made.
   * Occasionally, we want to ensure a full compaction, which is determined
   * by the MarkSweepAlwaysCompactCount parameter. This is a significant
   * performance improvement!
   */
  bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);

  size_t allowed_deadspace = 0;
  if (skip_dead) {
    const size_t ratio = allowed_dead_ratio();
    allowed_deadspace = space()->capacity_in_words() * ratio / 100;
  }

  // Fetch the current destination decorator
  PSMarkSweepDecorator* dest = destination_decorator();
  ObjectStartArray* start_array = dest->start_array();

  HeapWord* compact_top = dest->compaction_top();
  HeapWord* compact_end = dest->space()->end();

  HeapWord* q = space()->bottom();
  HeapWord* t = space()->top();

  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last
                                   live object. */
  HeapWord*  first_dead = space()->end(); /* The first dead object. */
  LiveRange* liveRange  = NULL; /* The current live range, recorded in the
                                   first header of preceding free area. */
  _first_dead = first_dead;

  const intx interval = PrefetchScanIntervalInBytes;

  while (q < t) {
    assert(oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
           oop(q)->mark()->has_bias_pattern(),
           "these are the only valid states during a mark sweep");
    if (oop(q)->is_gc_marked()) {
      /* prefetch beyond q */
      Prefetch::write(q, interval);
      size_t size = oop(q)->size();

      size_t compaction_max_size = pointer_delta(compact_end, compact_top);

      // This should only happen if a space in the young gen overflows the
      // old gen. If that should happen, we null out the start_array, because
      // the young spaces are not covered by one.
      while(size > compaction_max_size) {
        // First record the last compact_top
        dest->set_compaction_top(compact_top);

        // Advance to the next compaction decorator
        advance_destination_decorator();
        dest = destination_decorator();

        // Update compaction info
        start_array = dest->start_array();
        compact_top = dest->compaction_top();
        compact_end = dest->space()->end();
        assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
        assert(compact_end > compact_top, "Must always be space remaining");
        compaction_max_size =
          pointer_delta(compact_end, compact_top);
      }

      // store the forwarding pointer into the mark word
      if (q != compact_top) {
        oop(q)->forward_to(oop(compact_top));
        assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
      } else {
        // if the object isn't moving we can just set the mark to the default
        // mark and handle it specially later on.
        oop(q)->init_mark();
        assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
      }

      // Update object start array
      if (start_array) {
        start_array->allocate_block(compact_top);
      }

      compact_top += size;
      assert(compact_top <= dest->space()->end(),
        "Exceeding space in destination");

      q += size;
      end_of_live = q;
    } else {
      /* run over all the contiguous dead objects */
      HeapWord* end = q;
      do {
        /* prefetch beyond end */
        Prefetch::write(end, interval);
        end += oop(end)->size();
      } while (end < t && (!oop(end)->is_gc_marked()));

      /* see if we might want to pretend this object is alive so that
       * we don't have to compact quite as often.
       */
      if (allowed_deadspace > 0 && q == compact_top) {
        size_t sz = pointer_delta(end, q);
        if (insert_deadspace(allowed_deadspace, q, sz)) {
          size_t compaction_max_size = pointer_delta(compact_end, compact_top);

          // This should only happen if a space in the young gen overflows the
          // old gen. If that should happen, we null out the start_array, because
          // the young spaces are not covered by one.
          while (sz > compaction_max_size) {
            // First record the last compact_top
            dest->set_compaction_top(compact_top);

            // Advance to the next compaction decorator
            advance_destination_decorator();
            dest = destination_decorator();

            // Update compaction info
            start_array = dest->start_array();
            compact_top = dest->compaction_top();
            compact_end = dest->space()->end();
            assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
            assert(compact_end > compact_top, "Must always be space remaining");
            compaction_max_size =
              pointer_delta(compact_end, compact_top);
          }

          // store the forwarding pointer into the mark word
          if (q != compact_top) {
            oop(q)->forward_to(oop(compact_top));
            assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
          } else {
            // if the object isn't moving we can just set the mark to the default
            // mark and handle it specially later on.
            oop(q)->init_mark();
            assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
          }

          // Update object start array
          if (start_array) {
            start_array->allocate_block(compact_top);
          }

          compact_top += sz;
          assert(compact_top <= dest->space()->end(),
            "Exceeding space in destination");

          q = end;
          end_of_live = end;
          continue;
        }
      }

      /* for the previous LiveRange, record the end of the live objects. */
      if (liveRange) {
        liveRange->set_end(q);
      }

      /* record the current LiveRange object.
       * liveRange->start() is overlaid on the mark word.
       */
      liveRange = (LiveRange*)q;
      liveRange->set_start(end);
      liveRange->set_end(end);

      /* see if this is the first dead region. */
      if (q < first_dead) {
        first_dead = q;
      }

      /* move on to the next object */
      q = end;
    }
  }

  assert(q == t, "just checking");
  if (liveRange != NULL) {
    liveRange->set_end(q);
  }
  _end_of_live = end_of_live;
  if (end_of_live < first_dead) {
    first_dead = end_of_live;
  }
  _first_dead = first_dead;

  // Update compaction top
  dest->set_compaction_top(compact_top);
}
Esempio n. 18
0
bool sendDesc::verify() {
  if (isPrimCall()) return true;
  LookupType l= lookupType();
  bool flag= checkLookupTypeAndEntryPoint();
  
  if (isPerformLookupType(l)) {
    if (arg_count() < 0 || arg_count() > 100) {
      error2("sendDesc %#lx arg count %ld is invalid", this, arg_count());
      flag = false;
    }
  } else {
    if (! oop(selector())->verify_oop()) {
      flag = false;
    } else if (! selector()->is_string()) {
      error1("sendDesc %#lx selector isn't a string", this);
      flag = false;
    }
    nmethod* nm = target();
    if (nm == NULL) {
      CountStub* cs = countStub();
      if (cs) nm = cs->target();
    }
    if (nm == NULL) {
      CacheStub* cs = pic();
      if (cs) {
        nm= cs->get_method(0);
        oop sel= nm->key.selector;
        for (fint i= 1; i < cs->arity(); ++i)
          if (cs->get_method(i)->key.selector != sel)
            error2("sendDesc %#lx: selector != PIC case %d selector",
                   this, i);
      }
    }
    if (nm && nm->key.selector != selector())
      error1("sendDesc %#lx: selector != target nmethod's selector", this);
  }
  if (l & DelegateeStaticBit) {
    if (! delegatee()->verify_oop()) {
      flag = false;
    } else if (baseLookupType(l) == DirectedResendLookupType &&
               ! delegatee()->is_string()) {
      error1("sendDesc %#lx delegatee isn't a string", this);
      flag = false;
    }
  }
  if (!dependency()->verify_list_integrity()) {
    lprintf("\tof sendDesc %#lx\n", this);
    flag = false;
  }
  if (pic()) {
    if (dependency()->next != dependency()->prev)
      error1("sendDesc %#lx: more than one elem in dependency chain", this);
    pic()->verify();
  } else {
    CountStub *cs= countStub();
    if (cs == NULL) {
      if (isCounting() && jump_addr() != lookupRoutine())
        error1("sendDesc %#lx: doesn't have countStub but is counting", this);
    } else {
      if (!isCounting() && !cs->isAgingStub())
        error1("sendDesc %#lx: has countStub but is not counting", this);
      if (dependency()->next != dependency()->prev)
        error1("sendDesc %#lx: more than one elem in dependency chain", this);
      countStub()->verify2(NULL);
    }
  }
  return flag;
}
Esempio n. 19
0
/*
 * Main loop for command mode command decoding.
 * A few commands are executed here, but main function
 * is to strip command addresses, do a little address oriented
 * processing and call command routines to do the real work.
 */
void
commands(bool noprompt, bool exitoneof)
{
	register line *addr;
	register int c;
	register int lchng;
	int given;
	int seensemi;
	int cnt;
	bool hadpr;

	resetflav();
	nochng();
	for (;;) {
		/*
		 * If dot at last command
		 * ended up at zero, advance to one if there is a such.
		 */
		if (dot <= zero) {
			dot = zero;
			if (dol > zero)
				dot = one;
		}
		shudclob = 0;

		/*
		 * If autoprint or trailing print flags,
		 * print the line at the specified offset
		 * before the next command.
		 */
		if (pflag ||
		    (lchng != chng && value(AUTOPRINT) && !inglobal && !inopen && endline)) {
			pflag = 0;
			nochng();
			if (dol != zero) {
				addr1 = addr2 = dot + poffset;
				if (addr1 < one || addr1 > dol)
error("Offset out-of-bounds|Offset after command too large");
				setdot1();
				goto print;
			}
		}
		nochng();

		/*
		 * Print prompt if appropriate.
		 * If not in global flush output first to prevent
		 * going into pfast mode unreasonably.
		 */
		if (inglobal == 0) {
			flush();
			if (!hush && value(PROMPT) && !globp && !noprompt && endline) {
				ex_putchar(':');
				hadpr = 1;
			}
			TSYNC();
		}

		/*
		 * Gobble up the address.
		 * Degenerate addresses yield ".".
		 */
		addr2 = 0;
		given = seensemi = 0;
		do {
			addr1 = addr2;
			addr = address(0);
			c = getcd();
			if (addr == 0) {
				if (c == ',')
					addr = dot;
				else if (addr1 != 0) {
					addr2 = dot;
					break;
				} else
					break;
			}
			addr2 = addr;
			given++;
			if (c == ';') {
				c = ',';
				dot = addr;
				seensemi = 1;
			}
		} while (c == ',');
		if (c == '%') {
			/* %: same as 1,$ */
			addr1 = one;
			addr2 = dol;
			given = 2;
			c = ex_getchar();
		}
		if (addr1 == 0)
			addr1 = addr2;
		if (c == ':')
			c = ex_getchar();

		/*
		 * Set command name for special character commands.
		 */
		tailspec(c);

		/*
		 * If called via : escape from open or visual, limit
		 * the set of available commands here to save work below.
		 */
		if (inopen) {
			if (c=='\n' || c=='\r' || c==CTRL('d') || c==EOF) {
				if (addr2)
					dot = addr2;
				if (c == EOF)
					return;
				continue;
			}
			if (any(c, "o"))
notinvis:
				tailprim(Command, 1, 1);
		}
		switch (c) {

		case 'a':

			switch(peekchar()) {
			case 'b':
/* abbreviate */
				tail("abbreviate");
				setnoaddr();
				mapcmd(0, 1);
				anyabbrs = 1;
				continue;
			case 'r':
/* args */
				tail("args");
				setnoaddr();
				eol();
				pargs();
				continue;
			}

/* append */
			if (inopen)
				goto notinvis;
			tail("append");
			setdot();
			aiflag = exclam();
			ex_newline();
			vmacchng(0);
			deletenone();
			setin(addr2);
			inappend = 1;
			ignore(append(gettty, addr2));
			inappend = 0;
			nochng();
			continue;

		case 'c':
			switch (peekchar()) {

/* copy */
			case 'o':
				tail("copy");
				vmacchng(0);
				move();
				continue;

#ifdef CHDIR
/* cd */
			case 'd':
				tail("cd");
				goto changdir;

/* chdir */
			case 'h':
				ignchar();
				if (peekchar() == 'd') {
					register char *p;
					tail2of("chdir");
changdir:
					if (savedfile[0] == '/' || !value(WARN))
						ignore(exclam());
					else
						ignore(quickly());
					if (skipend()) {
						p = getenv("HOME");
						if (p == NULL)
							error("Home directory unknown");
					} else
						getone(), p = file;
					eol();
					if (chdir(p) < 0)
						filioerr(p);
					if (savedfile[0] != '/')
						edited = 0;
					continue;
				}
				if (inopen)
					tailprim("change", 2, 1);
				tail2of("change");
				break;

#endif
			default:
				if (inopen)
					goto notinvis;
				tail("change");
				break;
			}
/* change */
			aiflag = exclam();
			setCNL();
			vmacchng(0);
			setin(addr1);
			delete(0);
			inappend = 1;
			ignore(append(gettty, addr1 - 1));
			inappend = 0;
			nochng();
			continue;

/* delete */
		case 'd':
			/*
			 * Caution: dp and dl have special meaning already.
			 */
			tail("delete");
			c = cmdreg();
			setCNL();
			vmacchng(0);
			if (c)
				YANKreg(c);
			delete(0);
			appendnone();
			continue;

/* edit */
/* ex */
		case 'e':
			tail(peekchar() == 'x' ? "ex" : "edit");
editcmd:
			if (!exclam() && chng)
				c = 'E';
			filename(c);
			if (c == 'E') {
				ungetchar(lastchar());
				ignore(quickly());
			}
			setnoaddr();
doecmd:
			init();
			addr2 = zero;
			laste++;
			ex_sync();
			rop(c);
			nochng();
			continue;

/* file */
		case 'f':
			tail("file");
			setnoaddr();
			filename(c);
			noonl();
/*
			synctmp();
*/
			continue;

/* global */
		case 'g':
			tail("global");
			global(!exclam());
			nochng();
			continue;

/* insert */
		case 'i':
			if (inopen)
				goto notinvis;
			tail("insert");
			setdot();
			nonzero();
			aiflag = exclam();
			ex_newline();
			vmacchng(0);
			deletenone();
			setin(addr2);
			inappend = 1;
			ignore(append(gettty, addr2 - 1));
			inappend = 0;
			if (dot == zero && dol > zero)
				dot = one;
			nochng();
			continue;

/* join */
		case 'j':
			tail("join");
			c = exclam();
			setcount();
			nonzero();
			ex_newline();
			vmacchng(0);
			if (given < 2 && addr2 != dol)
				addr2++;
			join(c);
			continue;

/* k */
		case 'k':
casek:
			pastwh();
			c = ex_getchar();
			if (endcmd(c))
				serror("Mark what?|%s requires following letter", Command);
			ex_newline();
			if (!islower(c))
				error("Bad mark|Mark must specify a letter");
			setdot();
			nonzero();
			names[c - 'a'] = *addr2 &~ 01;
			anymarks = 1;
			continue;

/* list */
		case 'l':
			tail("list");
			setCNL();
			ignorf(setlist(1));
			pflag = 0;
			goto print;

		case 'm':
			if (peekchar() == 'a') {
				ignchar();
				if (peekchar() == 'p') {
/* map */
					tail2of("map");
					setnoaddr();
					mapcmd(0, 0);
					continue;
				}
/* mark */
				tail2of("mark");
				goto casek;
			}
/* move */
			tail("move");
			vmacchng(0);
			move();
			continue;

		case 'n':
			if (peekchar() == 'u') {
				tail("number");
				goto numberit;
			}
/* next */
			tail("next");
			setnoaddr();
			ckaw();
			ignore(quickly());
			if (getargs())
				makargs();
			next();
			c = 'e';
			filename(c);
			goto doecmd;

/* open */
		case 'o':
			tail("open");
			oop();
			pflag = 0;
			nochng();
			continue;

		case 'p':
		case 'P':
			switch (peekchar()) {

/* put */
			case 'u':
				tail("put");
				setdot();
				c = cmdreg();
				eol();
				vmacchng(0);
				if (c)
					putreg(c);
				else
					put();
				continue;

			case 'r':
				ignchar();
				if (peekchar() == 'e') {
/* preserve */
					tail2of("preserve");
					eol();
					if (preserve() == 0)
						error("Preserve failed!");
					else
						error("File preserved.");
				}
				tail2of("print");
				break;

			default:
				tail("print");
				break;
			}
/* print */
			setCNL();
			pflag = 0;
print:
			nonzero();
			if (CL && span() > EX_LINES) {
				flush1();
				vclear();
			}
			plines(addr1, addr2, 1);
			continue;

/* quit */
		case 'q':
			tail("quit");
			setnoaddr();
			c = quickly();
			eol();
			if (!c)
quit:
				nomore();
			if (inopen) {
				vgoto(WECHO, 0);
				if (!ateopr())
					vnfl();
				else {
					tostop();
				}
				flush();
				setty(normf);
			}
			cleanup(1);
			ex_exit(0);

		case 'r':
			if (peekchar() == 'e') {
				ignchar();
				switch (peekchar()) {

/* rewind */
				case 'w':
					tail2of("rewind");
					setnoaddr();
					if (!exclam()) {
						ckaw();
						if (chng && dol > zero)
							error("No write@since last chage (:rewind! overrides)");
					}
					eol();
					erewind();
					next();
					c = 'e';
					ungetchar(lastchar());
					filename(c);
					goto doecmd;

/* recover */
				case 'c':
					tail2of("recover");
					setnoaddr();
					c = 'e';
					if (!exclam() && chng)
						c = 'E';
					filename(c);
					if (c == 'E') {
						ungetchar(lastchar());
						ignore(quickly());
					}
					init();
					addr2 = zero;
					laste++;
					ex_sync();
					recover();
					rop2();
					revocer();
					if (status == 0)
						rop3(c);
					if (dol != zero)
						change();
					nochng();
					continue;
				}
				tail2of("read");
			} else
				tail("read");
/* read */
			if (savedfile[0] == 0 && dol == zero)
				c = 'e';
			pastwh();
			vmacchng(0);
			if (peekchar() == '!') {
				setdot();
				ignchar();
				unix0(0);
				filter(0);
				continue;
			}
			filename(c);
			rop(c);
			nochng();
			if (inopen && endline && addr1 > zero && addr1 < dol)
				dot = addr1 + 1;
			continue;

		case 's':
			switch (peekchar()) {
			/*
			 * Caution: 2nd char cannot be c, g, or r
			 * because these have meaning to substitute.
			 */

/* set */
			case 'e':
				tail("set");
				setnoaddr();
				set();
				continue;

/* shell */
			case 'h':
				tail("shell");
				setNAEOL();
				vnfl();
				putpad(TE);
				flush();
				unixwt(1, unixex("-i", (char *) 0, 0, 0));
				vcontin(0);
				continue;

/* source */
			case 'o':
#ifdef notdef
				if (inopen)
					goto notinvis;
#endif
				tail("source");
				setnoaddr();
				getone();
				eol();
				source(file, 0);
				continue;
#ifdef SIGTSTP
/* stop, suspend */
			case 't':
				tail("stop");
				goto suspend;
			case 'u':
				tail("suspend");
suspend:
				if (!dosusp)
					error("Old tty driver|Not using new tty driver/shell");
				c = exclam();
				eol();
				if (!c)
					ckaw();
				onsusp(0);
				continue;
#endif

			}
			/* fall into ... */

/* & */
/* ~ */
/* substitute */
		case '&':
		case '~':
			Command = "substitute";
			if (c == 's')
				tail(Command);
			vmacchng(0);
			if (!substitute(c))
				pflag = 0;
			continue;

/* t */
		case 't':
			if (peekchar() == 'a') {
				tail("tag");
				tagfind(exclam());
				if (!inopen)
					lchng = chng - 1;
				else
					nochng();
				continue;
			}
			tail("t");
			vmacchng(0);
			move();
			continue;

		case 'u':
			if (peekchar() == 'n') {
				ignchar();
				switch(peekchar()) {
/* unmap */
				case 'm':
					tail2of("unmap");
					setnoaddr();
					mapcmd(1, 0);
					continue;
/* unabbreviate */
				case 'a':
					tail2of("unabbreviate");
					setnoaddr();
					mapcmd(1, 1);
					anyabbrs = 1;
					continue;
				}
/* undo */
				tail2of("undo");
			} else
				tail("undo");
			setnoaddr();
			markDOT();
			c = exclam();
			ex_newline();
			undo(c);
			continue;

		case 'v':
			switch (peekchar()) {

			case 'e':
/* version */
				tail("version");
				setNAEOL();
				ex_printf("@(#) Version 3.6, 11/3/80"
				    " (4.0BSD).  git "
				    "160803 14:24"
				    +5);
				noonl();
				continue;

/* visual */
			case 'i':
				tail("visual");
				if (inopen) {
					c = 'e';
					goto editcmd;
				}
				vop();
				pflag = 0;
				nochng();
				continue;
			}
/* v */
			tail("v");
			global(0);
			nochng();
			continue;

/* write */
		case 'w':
			c = peekchar();
			tail(c == 'q' ? "wq" : "write");
wq:
			if (skipwh() && peekchar() == '!') {
				pofix();
				ignchar();
				setall();
				unix0(0);
				filter(1);
			} else {
				setall();
				wop(1);
				nochng();
			}
			if (c == 'q')
				goto quit;
			continue;

/* xit */
		case 'x':
			tail("xit");
			if (!chng)
				goto quit;
			c = 'q';
			goto wq;

/* yank */
		case 'y':
			tail("yank");
			c = cmdreg();
			setcount();
			eol();
			vmacchng(0);
			if (c)
				YANKreg(c);
			else
				yank();
			continue;

/* z */
		case 'z':
			zop(0);
			pflag = 0;
			continue;

/* * */
/* @ */
		case '*':
		case '@':
			c = ex_getchar();
			if (c=='\n' || c=='\r')
				ungetchar(c);
			if (any(c, "@*\n\r"))
				c = lastmac;
			if (isupper(c))
				c = tolower(c);
			if (!islower(c))
				error("Bad register");
			ex_newline();
			setdot();
			cmdmac(c);
			continue;

/* | */
		case '|':
			endline = 0;
			goto caseline;

/* \n */
		case '\n':
			endline = 1;
caseline:
			notempty();
			if (addr2 == 0) {
				if (UP != NOSTR && c == '\n' && !inglobal)
					c = CTRL('k');
				if (inglobal)
					addr1 = addr2 = dot;
				else {
					if (dot == dol)
						error("At EOF|At end-of-file");
					addr1 = addr2 = dot + 1;
				}
			}
			setdot();
			nonzero();
			if (seensemi)
				addr1 = addr2;
			ex_getline(*addr1);
			if (c == CTRL('k')) {
				flush1();
				destline--;
				if (hadpr)
					shudclob = 1;
			}
			plines(addr1, addr2, 1);
			continue;

/* " */
		case '"':
			comment();
			continue;

/* # */
		case '#':
numberit:
			setCNL();
			ignorf(setnumb(1));
			pflag = 0;
			goto print;

/* = */
		case '=':
			ex_newline();
			setall();
			if (inglobal == 2)
				pofix();
			ex_printf("%d", lineno(addr2));
			noonl();
			continue;

/* ! */
		case '!':
			if (addr2 != 0) {
				vmacchng(0);
				unix0(0);
				setdot();
				filter(2);
			} else {
				unix0(1);
				pofix();
				putpad(TE);
				flush();
				unixwt(1, unixex("-c", uxb, 0, 0));
				vclrech(1);	/* vcontin(0); */
				nochng();
			}
			continue;

/* < */
/* > */
		case '<':
		case '>':
			for (cnt = 1; peekchar() == c; cnt++)
				ignchar();
			setCNL();
			vmacchng(0);
			shift(c, cnt);
			continue;

/* ^D */
/* EOF */
		case CTRL('d'):
		case EOF:
			if (exitoneof) {
				if (addr2 != 0)
					dot = addr2;
				return;
			}
			if (!isatty(0)) {
				if (intty)
					/*
					 * Chtty sys call at UCB may cause a
					 * input which was a tty to suddenly be
					 * turned into /dev/null.
					 */
					onhup(0);
				return;
			}
			if (addr2 != 0) {
				setlastchar('\n');
				putnl();
			}
			if (dol == zero) {
				if (addr2 == 0)
					putnl();
				notempty();
			}
			ungetchar(EOF);
			zop(hadpr);
			continue;

		default:
			if (!isalpha(c))
				break;
			ungetchar(c);
			tailprim("", 0, 0);
		}
		ierror("What?|Unknown command character '%c'", c);
	}
}
Esempio n. 20
0
void HeapRegion::verify(VerifyOption vo,
                        bool* failures) const {
  G1CollectedHeap* g1 = G1CollectedHeap::heap();
  *failures = false;
  HeapWord* p = bottom();
  HeapWord* prev_p = NULL;
  VerifyLiveClosure vl_cl(g1, vo);
  bool is_humongous = isHumongous();
  bool do_bot_verify = !is_young();
  size_t object_num = 0;
  while (p < top()) {
    oop obj = oop(p);
    size_t obj_size = obj->size();
    object_num += 1;

    if (is_humongous != g1->isHumongous(obj_size)) {
      gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
                             SIZE_FORMAT" words) in a %shumongous region",
                             p, g1->isHumongous(obj_size) ? "" : "non-",
                             obj_size, is_humongous ? "" : "non-");
       *failures = true;
       return;
    }

    // If it returns false, verify_for_object() will output the
    // appropriate messasge.
    if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
      *failures = true;
      return;
    }

    if (!g1->is_obj_dead_cond(obj, this, vo)) {
      if (obj->is_oop()) {
        Klass* klass = obj->klass();
        if (!klass->is_metaspace_object()) {
          gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
                                 "not metadata", klass, (void *)obj);
          *failures = true;
          return;
        } else if (!klass->is_klass()) {
          gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
                                 "not a klass", klass, (void *)obj);
          *failures = true;
          return;
        } else {
          vl_cl.set_containing_obj(obj);
          obj->oop_iterate_no_header(&vl_cl);
          if (vl_cl.failures()) {
            *failures = true;
          }
          if (G1MaxVerifyFailures >= 0 &&
              vl_cl.n_failures() >= G1MaxVerifyFailures) {
            return;
          }
        }
      } else {
        gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj);
        *failures = true;
        return;
      }
    }
    prev_p = p;
    p += obj_size;
  }

  if (p != top()) {
    gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
                           "does not match top "PTR_FORMAT, p, top());
    *failures = true;
    return;
  }

  HeapWord* the_end = end();
  assert(p == top(), "it should still hold");
  // Do some extra BOT consistency checking for addresses in the
  // range [top, end). BOT look-ups in this range should yield
  // top. No point in doing that if top == end (there's nothing there).
  if (p < the_end) {
    // Look up top
    HeapWord* addr_1 = p;
    HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
    if (b_start_1 != p) {
      gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
                             " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                             addr_1, b_start_1, p);
      *failures = true;
      return;
    }

    // Look up top + 1
    HeapWord* addr_2 = p + 1;
    if (addr_2 < the_end) {
      HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
      if (b_start_2 != p) {
        gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
                               " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                               addr_2, b_start_2, p);
        *failures = true;
        return;
      }
    }

    // Look up an address between top and end
    size_t diff = pointer_delta(the_end, p) / 2;
    HeapWord* addr_3 = p + diff;
    if (addr_3 < the_end) {
      HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
      if (b_start_3 != p) {
        gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
                               " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                               addr_3, b_start_3, p);
        *failures = true;
        return;
      }
    }

    // Loook up end - 1
    HeapWord* addr_4 = the_end - 1;
    HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
    if (b_start_4 != p) {
      gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
                             " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                             addr_4, b_start_4, p);
      *failures = true;
      return;
    }
  }

  if (is_humongous && object_num > 1) {
    gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
                           "but has "SIZE_FORMAT", objects",
                           bottom(), end(), object_num);
    *failures = true;
    return;
  }

  verify_strong_code_roots(vo, failures);
}
Esempio n. 21
0
void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
    // We don't need to do young-gen spaces.
    if (s->end() <= gen_boundary) return;
    MemRegion used = s->used_region();

    jbyte* cur_entry = byte_for(used.start());
    jbyte* limit = byte_after(used.last());
    while (cur_entry < limit) {
        if (*cur_entry == CardTableModRefBS::clean_card) {
            jbyte* first_dirty = cur_entry+1;
            while (first_dirty < limit &&
                    *first_dirty == CardTableModRefBS::clean_card) {
                first_dirty++;
            }
            // If the first object is a regular object, and it has a
            // young-to-old field, that would mark the previous card.
            HeapWord* boundary = addr_for(cur_entry);
            HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty);
            HeapWord* boundary_block = s->block_start(boundary);
            HeapWord* begin = boundary;             // Until proven otherwise.
            HeapWord* start_block = boundary_block; // Until proven otherwise.
            if (boundary_block < boundary) {
                if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
                    oop boundary_obj = oop(boundary_block);
                    if (!boundary_obj->is_objArray() &&
                            !boundary_obj->is_typeArray()) {
                        guarantee(cur_entry > byte_for(used.start()),
                                  "else boundary would be boundary_block");
                        if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
                            begin = boundary_block + s->block_size(boundary_block);
                            start_block = begin;
                        }
                    }
                }
            }
            // Now traverse objects until end.
            if (begin < end) {
                MemRegion mr(begin, end);
                VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
                for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) {
                    if (s->block_is_obj(cur) && s->obj_is_alive(cur)) {
                        oop(cur)->oop_iterate(&verify_blk, mr);
                    }
                }
            }
            cur_entry = first_dirty;
        } else {
            // We'd normally expect that cur_youngergen_and_prev_nonclean_card
            // is a transient value, that cannot be in the card table
            // except during GC, and thus assert that:
            // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
            //        "Illegal CT value");
            // That however, need not hold, as will become clear in the
            // following...

            // We'd normally expect that if we are in the parallel case,
            // we can't have left a prev value (which would be different
            // from the current value) in the card table, and so we'd like to
            // assert that:
            // guarantee(cur_youngergen_card_val() == youngergen_card
            //           || !is_prev_youngergen_card_val(*cur_entry),
            //           "Illegal CT value");
            // That, however, may not hold occasionally, because of
            // CMS or MSC in the old gen. To wit, consider the
            // following two simple illustrative scenarios:
            // (a) CMS: Consider the case where a large object L
            //     spanning several cards is allocated in the old
            //     gen, and has a young gen reference stored in it, dirtying
            //     some interior cards. A young collection scans the card,
            //     finds a young ref and installs a youngergenP_n value.
            //     L then goes dead. Now a CMS collection starts,
            //     finds L dead and sweeps it up. Assume that L is
            //     abutting _unallocated_blk, so _unallocated_blk is
            //     adjusted down to (below) L. Assume further that
            //     no young collection intervenes during this CMS cycle.
            //     The next young gen cycle will not get to look at this
            //     youngergenP_n card since it lies in the unoccupied
            //     part of the space.
            //     Some young collections later the blocks on this
            //     card can be re-allocated either due to direct allocation
            //     or due to absorbing promotions. At this time, the
            //     before-gc verification will fail the above assert.
            // (b) MSC: In this case, an object L with a young reference
            //     is on a card that (therefore) holds a youngergen_n value.
            //     Suppose also that L lies towards the end of the used
            //     the used space before GC. An MSC collection
            //     occurs that compacts to such an extent that this
            //     card is no longer in the occupied part of the space.
            //     Since current code in MSC does not always clear cards
            //     in the unused part of old gen, this stale youngergen_n
            //     value is left behind and can later be covered by
            //     an object when promotion or direct allocation
            //     re-allocates that part of the heap.
            //
            // Fortunately, the presence of such stale card values is
            // "only" a minor annoyance in that subsequent young collections
            // might needlessly scan such cards, but would still never corrupt
            // the heap as a result. However, it's likely not to be a significant
            // performance inhibitor in practice. For instance,
            // some recent measurements with unoccupied cards eagerly cleared
            // out to maintain this invariant, showed next to no
            // change in young collection times; of course one can construct
            // degenerate examples where the cost can be significant.)
            // Note, in particular, that if the "stale" card is modified
            // after re-allocation, it would be dirty, not "stale". Thus,
            // we can never have a younger ref in such a card and it is
            // safe not to scan that card in any collection. [As we see
            // below, we do some unnecessary scanning
            // in some cases in the current parallel scanning algorithm.]
            //
            // The main point below is that the parallel card scanning code
            // deals correctly with these stale card values. There are two main
            // cases to consider where we have a stale "younger gen" value and a
            // "derivative" case to consider, where we have a stale
            // "cur_younger_gen_and_prev_non_clean" value, as will become
            // apparent in the case analysis below.
            // o Case 1. If the stale value corresponds to a younger_gen_n
            //   value other than the cur_younger_gen value then the code
            //   treats this as being tantamount to a prev_younger_gen
            //   card. This means that the card may be unnecessarily scanned.
            //   There are two sub-cases to consider:
            //   o Case 1a. Let us say that the card is in the occupied part
            //     of the generation at the time the collection begins. In
            //     that case the card will be either cleared when it is scanned
            //     for young pointers, or will be set to cur_younger_gen as a
            //     result of promotion. (We have elided the normal case where
            //     the scanning thread and the promoting thread interleave
            //     possibly resulting in a transient
            //     cur_younger_gen_and_prev_non_clean value before settling
            //     to cur_younger_gen. [End Case 1a.]
            //   o Case 1b. Consider now the case when the card is in the unoccupied
            //     part of the space which becomes occupied because of promotions
            //     into it during the current young GC. In this case the card
            //     will never be scanned for young references. The current
            //     code will set the card value to either
            //     cur_younger_gen_and_prev_non_clean or leave
            //     it with its stale value -- because the promotions didn't
            //     result in any younger refs on that card. Of these two
            //     cases, the latter will be covered in Case 1a during
            //     a subsequent scan. To deal with the former case, we need
            //     to further consider how we deal with a stale value of
            //     cur_younger_gen_and_prev_non_clean in our case analysis
            //     below. This we do in Case 3 below. [End Case 1b]
            //   [End Case 1]
            // o Case 2. If the stale value corresponds to cur_younger_gen being
            //   a value not necessarily written by a current promotion, the
            //   card will not be scanned by the younger refs scanning code.
            //   (This is OK since as we argued above such cards cannot contain
            //   any younger refs.) The result is that this value will be
            //   treated as a prev_younger_gen value in a subsequent collection,
            //   which is addressed in Case 1 above. [End Case 2]
            // o Case 3. We here consider the "derivative" case from Case 1b. above
            //   because of which we may find a stale
            //   cur_younger_gen_and_prev_non_clean card value in the table.
            //   Once again, as in Case 1, we consider two subcases, depending
            //   on whether the card lies in the occupied or unoccupied part
            //   of the space at the start of the young collection.
            //   o Case 3a. Let us say the card is in the occupied part of
            //     the old gen at the start of the young collection. In that
            //     case, the card will be scanned by the younger refs scanning
            //     code which will set it to cur_younger_gen. In a subsequent
            //     scan, the card will be considered again and get its final
            //     correct value. [End Case 3a]
            //   o Case 3b. Now consider the case where the card is in the
            //     unoccupied part of the old gen, and is occupied as a result
            //     of promotions during thus young gc. In that case,
            //     the card will not be scanned for younger refs. The presence
            //     of newly promoted objects on the card will then result in
            //     its keeping the value cur_younger_gen_and_prev_non_clean
            //     value, which we have dealt with in Case 3 here. [End Case 3b]
            //   [End Case 3]
            //
            // (Please refer to the code in the helper class
            // ClearNonCleanCardWrapper and in CardTableModRefBS for details.)
            //
            // The informal arguments above can be tightened into a formal
            // correctness proof and it behooves us to write up such a proof,
            // or to use model checking to prove that there are no lingering
            // concerns.
            //
            // Clearly because of Case 3b one cannot bound the time for
            // which a card will retain what we have called a "stale" value.
            // However, one can obtain a Loose upper bound on the redundant
            // work as a result of such stale values. Note first that any
            // time a stale card lies in the occupied part of the space at
            // the start of the collection, it is scanned by younger refs
            // code and we can define a rank function on card values that
            // declines when this is so. Note also that when a card does not
            // lie in the occupied part of the space at the beginning of a
            // young collection, its rank can either decline or stay unchanged.
            // In this case, no extra work is done in terms of redundant
            // younger refs scanning of that card.
            // Then, the case analysis above reveals that, in the worst case,
            // any such stale card will be scanned unnecessarily at most twice.
            //
            // It is nonethelss advisable to try and get rid of some of this
            // redundant work in a subsequent (low priority) re-design of
            // the card-scanning code, if only to simplify the underlying
            // state machine analysis/proof. ysr 1/28/2002. XXX
            cur_entry++;
        }
    }
}
size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
    return oop(addr)->size();
}
Esempio n. 23
0
  FUNCTION_CASE(entry, SharedRuntime::lrem);
  FUNCTION_CASE(entry, SharedRuntime::lrem);
  FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
  FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
  FUNCTION_CASE(entry, trace_block_entry);

#undef FUNCTION_CASE

  return "<unknown function>";
}


JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, klassOopDesc* klass))
  NOT_PRODUCT(_new_instance_slowcase_cnt++;)

  assert(oop(klass)->is_klass(), "not a class");
  instanceKlassHandle h(thread, klass);
  h->check_valid_for_instantiation(true, CHECK);
  // make sure klass is initialized
  h->initialize(CHECK);
  // allocate instance and return via TLS
  oop obj = h->allocate_instance(CHECK);
  thread->set_vm_result(obj);
JRT_END


JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, klassOopDesc* klass, jint length))
  NOT_PRODUCT(_new_type_array_slowcase_cnt++;)
  // Note: no handle for klass needed since they are not used
  //       anymore after new_typeArray() and no GC can happen before.
  //       (This may have to change if this code changes!)
Esempio n. 24
0
void InterpretedIC::update_inline_cache(InterpretedIC* ic, frame* f, Bytecodes::Code send_code, klassOop klass, LookupResult result) {
  // update inline cache
  if (ic->is_empty() && ic->send_type() != Bytecodes::megamorphic_send) {
    // fill ic for the first time
    Bytecodes::Code new_send_code = Bytecodes::halt;
    if (result.is_entry()) {

      methodOop method = result.method();
      if (UseAccessMethods && Bytecodes::has_access_send_code(send_code) && method->is_accessMethod()) {
        // access method found ==> switch to access send
        new_send_code = Bytecodes::access_send_code_for(send_code);
        ic->set(new_send_code, method, klass);

      } else if (UsePredictedMethods && Bytecodes::has_predicted_send_code(send_code) && method->is_special_primitiveMethod()) {
        // predictable method found ==> switch to predicted send
        // NB: ic of predicted send should be empty so that the compiler knows whether another type occurred or not
        // i.e., {predicted + empty} --> 1 class, {predicted + nonempty} --> 2 klasses (polymorphic)
        // but: this actually doesn't work (yet?) since the interpreter fills the ic on any failure (e.g. overflow)
        new_send_code = method->special_primitive_code();
        method = methodOop(ic->selector()); // ic must stay empty
        klass  = NULL;                     // ic must stay empty
        ic->set(new_send_code, method, klass);

      } else {
        // jump table entry found ==> switch to compiled send
        new_send_code = Bytecodes::compiled_send_code_for(send_code);
        ic->set(new_send_code, oop(result.entry()->entry_point()), klass);
      }
    } else {
      // methodOop found
      methodOop method = result.method();

      if (UseAccessMethods && Bytecodes::has_access_send_code(send_code) && method->is_accessMethod()) {
        // access method found ==> switch to access send
        new_send_code = Bytecodes::access_send_code_for(send_code);

      } else if (UsePredictedMethods && Bytecodes::has_predicted_send_code(send_code) && method->is_special_primitiveMethod()) {
        // predictable method found ==> switch to predicted send
        // NB: ic of predicted send should be empty so that the compiler knows whether another type occurred or not
        // i.e., {predicted + empty} --> 1 class, {predicted + nonempty} --> 2 klasses (polymorphic)
        // but: this actually doesn't work (yet?) since the interpreter fills the ic on any failure (e.g. overflow)
        new_send_code = method->special_primitive_code();
        method = methodOop(ic->selector()); // ic must stay empty
        klass  = NULL;                     // ic must stay empty

      } else if (UsePrimitiveMethods && method->is_primitiveMethod()) {
        // primitive method found ==> switch to primitive send
        new_send_code = Bytecodes::primitive_send_code_for(send_code);
        Unimplemented(); // take this out when all primitive send bytecodes implemented

      } else {
        // normal interpreted send ==> do not change
        new_send_code = send_code;
        assert(new_send_code == Bytecodes::original_send_code_for(send_code), "bytecode should not change");
      }
      assert(new_send_code != Bytecodes::halt, "new_send_code not set");
      ic->set(new_send_code, method, klass);
    }
  } else {
    // ic not empty
    switch (ic->send_type()) {
      // monomorphic send
  case Bytecodes::accessor_send   : // fall through
  case Bytecodes::predicted_send  : // fall through
  case Bytecodes::compiled_send   : // fall through
  case Bytecodes::interpreted_send: {
    // switch to polymorphic send with 2 entries
    objArrayOop pic = Interpreter_PICs::allocate(2);
    Interpreter_PICs::set_first(pic, ic->first_word(), ic->second_word());
    Interpreter_PICs::set_second(pic, result.value(), klass);
    ic->set(Bytecodes::polymorphic_send_code_for(send_code), ic->selector(), pic);
    break;
                                    }

                                    // polymorphic send
  case Bytecodes::polymorphic_send: {

    objArrayOop old_pic = ic->pic_array();
    objArrayOop new_pic = Interpreter_PICs::extend(old_pic); // add an entry to the PIC if appropriate
    if (new_pic == NULL) {
      // switch to megamorphic send
      if (Bytecodes::is_super_send(send_code)) {
        ic->set(Bytecodes::megamorphic_send_code_for(send_code), result.value(), klass);
      } else {
        ic->set(Bytecodes::megamorphic_send_code_for(send_code), ic->selector(), NULL);
      }
    } else {
      // still a polymorphic send, add entry and set ic to new_pic
      Interpreter_PICs::set_last(new_pic, result.value(), klass);
      ic->set(send_code, ic->selector(), new_pic);
    }
    // recycle old pic
    Interpreter_PICs::deallocate(old_pic);
    break;
                                    }

                                    // megamorphic send
  case Bytecodes::megamorphic_send: {
    if (Bytecodes::is_super_send(send_code)) {
      ic->set(send_code, result.value(), klass);
    }
    break;
                                    }

  default: ShouldNotReachHere();
    }
  }

  // redo send (reset instruction pointer)
  f->set_hp(ic->send_code_addr());
}
Esempio n. 25
0
 bool match(klassOop klass, symbolOop selector) {
   return oop(selector) == _key.selector_or_method() 
       && klass         == _key.klass();
 }
inline void ParScanClosure::do_oop_work(T* p,
                                        bool gc_barrier,
                                        bool root_scan) {
  assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
          generation()->is_in_reserved(p))
         && (GenCollectedHeap::heap()->is_young_gen(generation()) || gc_barrier),
         "The gen must be right, and we must be doing the barrier "
         "in older generations.");
  T heap_oop = oopDesc::load_heap_oop(p);
  if (!oopDesc::is_null(heap_oop)) {
    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
    if ((HeapWord*)obj < _boundary) {
#ifndef PRODUCT
      if (_g->to()->is_in_reserved(obj)) {
        tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
        GenCollectedHeap* gch = GenCollectedHeap::heap();
        Space* sp = gch->space_containing(p);
        oop obj = oop(sp->block_start(p));
        assert((HeapWord*)obj < (HeapWord*)p, "Error");
        tty->print_cr("Object: " PTR_FORMAT, p2i((void *)obj));
        tty->print_cr("-------");
        obj->print();
        tty->print_cr("-----");
        tty->print_cr("Heap:");
        tty->print_cr("-----");
        gch->print();
        ShouldNotReachHere();
      }
#endif
      // OK, we need to ensure that it is copied.
      // We read the klass and mark in this order, so that we can reliably
      // get the size of the object: if the mark we read is not a
      // forwarding pointer, then the klass is valid: the klass is only
      // overwritten with an overflow next pointer after the object is
      // forwarded.
      Klass* objK = obj->klass();
      markOop m = obj->mark();
      oop new_obj;
      if (m->is_marked()) { // Contains forwarding pointer.
        new_obj = ParNewGeneration::real_forwardee(obj);
        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
        log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
                                        "forwarded ",
                                        new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
      } else {
        size_t obj_sz = obj->size_given_klass(objK);
        new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
        if (root_scan) {
          // This may have pushed an object.  If we have a root
          // category with a lot of roots, can't let the queue get too
          // full:
          (void)_par_scan_state->trim_queues(10 * ParallelGCThreads);
        }
      }
      if (is_scanning_a_klass()) {
        do_klass_barrier();
      } else if (gc_barrier) {
        // Now call parent closure
        par_do_barrier(p);
      }
    }
  }
}
Esempio n. 27
0
 oop get_oop() {
   assert(is_oop(), "bad call");
   return oop(_ptr);
 }
bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
                                             PSYoungGen* young_gen,
                                             PSOldGen* old_gen) {
  MutableSpace* const eden_space = young_gen->eden_space();
  assert(!eden_space->is_empty(), "eden must be non-empty");
  assert(young_gen->virtual_space()->alignment() ==
         old_gen->virtual_space()->alignment(), "alignments do not match");

  if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
    return false;
  }

  // Both generations must be completely committed.
  if (young_gen->virtual_space()->uncommitted_size() != 0) {
    return false;
  }
  if (old_gen->virtual_space()->uncommitted_size() != 0) {
    return false;
  }

  // Figure out how much to take from eden.  Include the average amount promoted
  // in the total; otherwise the next young gen GC will simply bail out to a
  // full GC.
  const size_t alignment = old_gen->virtual_space()->alignment();
  const size_t eden_used = eden_space->used_in_bytes();
  const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
  const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
  const size_t eden_capacity = eden_space->capacity_in_bytes();

  if (absorb_size >= eden_capacity) {
    return false; // Must leave some space in eden.
  }

  const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
  if (new_young_size < young_gen->min_gen_size()) {
    return false; // Respect young gen minimum size.
  }

  if (TraceAdaptiveGCBoundary && Verbose) {
    gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
                        "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
                        "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
                        "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
                        absorb_size / K,
                        eden_capacity / K, (eden_capacity - absorb_size) / K,
                        young_gen->from_space()->used_in_bytes() / K,
                        young_gen->to_space()->used_in_bytes() / K,
                        young_gen->capacity_in_bytes() / K, new_young_size / K);
  }

  // Fill the unused part of the old gen.
  MutableSpace* const old_space = old_gen->object_space();
  HeapWord* const unused_start = old_space->top();
  size_t const unused_words = pointer_delta(old_space->end(), unused_start);

  if (unused_words > 0) {
    if (unused_words < CollectedHeap::min_fill_size()) {
      return false;  // If the old gen cannot be filled, must give up.
    }
    CollectedHeap::fill_with_objects(unused_start, unused_words);
  }

  // Take the live data from eden and set both top and end in the old gen to
  // eden top.  (Need to set end because reset_after_change() mangles the region
  // from end to virtual_space->high() in debug builds).
  HeapWord* const new_top = eden_space->top();
  old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
                                        absorb_size);
  young_gen->reset_after_change();
  old_space->set_top(new_top);
  old_space->set_end(new_top);
  old_gen->reset_after_change();

  // Update the object start array for the filler object and the data from eden.
  ObjectStartArray* const start_array = old_gen->start_array();
  for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
    start_array->allocate_block(p);
  }

  // Could update the promoted average here, but it is not typically updated at
  // full GCs and the value to use is unclear.  Something like
  //
  // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.

  size_policy->set_bytes_absorbed_from_eden(absorb_size);
  return true;
}
Esempio n. 29
0
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
        oop const old,
        markOop const old_mark) {
    const size_t word_sz = old->size();
    HeapRegion* const from_region = _g1h->heap_region_containing(old);
    // +1 to make the -1 indexes valid...
    const int young_index = from_region->young_index_in_cset()+1;
    assert( (from_region->is_young() && young_index >  0) ||
            (!from_region->is_young() && young_index == 0), "invariant" );
    const AllocationContext_t context = from_region->allocation_context();

    uint age = 0;
    InCSetState dest_state = next_state(state, old_mark, age);
    // The second clause is to prevent premature evacuation failure in case there
    // is still space in survivor, but old gen is full.
    if (_old_gen_is_full && dest_state.is_old()) {
        return handle_evacuation_failure_par(old, old_mark);
    }
    HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);

    // PLAB allocations should succeed most of the time, so we'll
    // normally check against NULL once and that's it.
    if (obj_ptr == NULL) {
        bool plab_refill_failed = false;
        obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context, &plab_refill_failed);
        if (obj_ptr == NULL) {
            obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context, plab_refill_failed);
            if (obj_ptr == NULL) {
                // This will either forward-to-self, or detect that someone else has
                // installed a forwarding pointer.
                return handle_evacuation_failure_par(old, old_mark);
            }
        }
        if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
            // The events are checked individually as part of the actual commit
            report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context);
        }
    }

    assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
    assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");

#ifndef PRODUCT
    // Should this evacuation fail?
    if (_g1h->evacuation_should_fail()) {
        // Doing this after all the allocation attempts also tests the
        // undo_allocation() method too.
        _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
        return handle_evacuation_failure_par(old, old_mark);
    }
#endif // !PRODUCT

    // We're going to allocate linearly, so might as well prefetch ahead.
    Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);

    const oop obj = oop(obj_ptr);
    const oop forward_ptr = old->forward_to_atomic(obj);
    if (forward_ptr == NULL) {
        Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);

        if (dest_state.is_young()) {
            if (age < markOopDesc::max_age) {
                age++;
            }
            if (old_mark->has_displaced_mark_helper()) {
                // In this case, we have to install the mark word first,
                // otherwise obj looks to be forwarded (the old mark word,
                // which contains the forward pointer, was copied)
                obj->set_mark(old_mark);
                markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
                old_mark->set_displaced_mark_helper(new_mark);
            } else {
                obj->set_mark(old_mark->set_age(age));
            }
            _age_table.add(age, word_sz);
        } else {
            obj->set_mark(old_mark);
        }

        if (G1StringDedup::is_enabled()) {
            const bool is_from_young = state.is_young();
            const bool is_to_young = dest_state.is_young();
            assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
                   "sanity");
            assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
                   "sanity");
            G1StringDedup::enqueue_from_evacuation(is_from_young,
                                                   is_to_young,
                                                   _worker_id,
                                                   obj);
        }

        _surviving_young_words[young_index] += word_sz;

        if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
            // We keep track of the next start index in the length field of
            // the to-space object. The actual length can be found in the
            // length field of the from-space object.
            arrayOop(obj)->set_length(0);
            oop* old_p = set_partial_array_mask(old);
            push_on_queue(old_p);
        } else {
            HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr);
            _scanner.set_region(to_region);
            obj->oop_iterate_backwards(&_scanner);
        }
        return obj;
    } else {
        _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
        return forward_ptr;
    }
}