Exemplo n.º 1
0
void CardTableRS::younger_refs_in_space_iterate(Space* sp,
                                                OopsInGenClosure* cl) {
  const MemRegion urasm = sp->used_region_at_save_marks();
#ifdef ASSERT
  // Convert the assertion check to a warning if we are running
  // CMS+ParNew until related bug is fixed.
  MemRegion ur    = sp->used_region();
  assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC),
         err_msg("Did you forget to call save_marks()? "
                 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
                 "[" PTR_FORMAT ", " PTR_FORMAT ")",
                 urasm.start(), urasm.end(), ur.start(), ur.end()));
  // In the case of CMS+ParNew, issue a warning
  if (!ur.contains(urasm)) {
    assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above");
    warning("CMS+ParNew: Did you forget to call save_marks()? "
            "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
            "[" PTR_FORMAT ", " PTR_FORMAT ")",
             urasm.start(), urasm.end(), ur.start(), ur.end());
    MemRegion ur2 = sp->used_region();
    MemRegion urasm2 = sp->used_region_at_save_marks();
    if (!ur.equals(ur2)) {
      warning("CMS+ParNew: Flickering used_region()!!");
    }
    if (!urasm.equals(urasm2)) {
      warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
    }
    ShouldNotReachHere();
  }
#endif
  _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
}
Exemplo n.º 2
0
void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
  assert(in_words(size()) == 4, "check code below - may need adjustment");
  // field[1] is always oop or NULL
  if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
  if (is_vfinal()) {
    if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
  }
}
Exemplo n.º 3
0
void MutableSpace::initialize(MemRegion mr,
                              bool clear_space,
                              bool mangle_space,
                              bool setup_pages) {

    assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
           "invalid space boundaries");

    if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
        // The space may move left and right or expand/shrink.
        // We'd like to enforce the desired page placement.
        MemRegion head, tail;
        if (last_setup_region().is_empty()) {
            // If it's the first initialization don't limit the amount of work.
            head = mr;
            tail = MemRegion(mr.end(), mr.end());
        } else {
            // Is there an intersection with the address space?
            MemRegion intersection = last_setup_region().intersection(mr);
            if (intersection.is_empty()) {
                intersection = MemRegion(mr.end(), mr.end());
            }
            // All the sizes below are in words.
            size_t head_size = 0, tail_size = 0;
            if (mr.start() <= intersection.start()) {
                head_size = pointer_delta(intersection.start(), mr.start());
            }
            if(intersection.end() <= mr.end()) {
                tail_size = pointer_delta(mr.end(), intersection.end());
            }
            // Limit the amount of page manipulation if necessary.
            if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
                const size_t change_size = head_size + tail_size;
                const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
                head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
                                 head_size);
                tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
                                 tail_size);
            }
            head = MemRegion(intersection.start() - head_size, intersection.start());
            tail = MemRegion(intersection.end(), intersection.end() + tail_size);
        }
        assert(mr.contains(head) && mr.contains(tail), "Sanity");

        if (UseNUMA) {
            numa_setup_pages(head, clear_space);
            numa_setup_pages(tail, clear_space);
        }

        if (AlwaysPreTouch) {
            pretouch_pages(head);
            pretouch_pages(tail);
        }

        // Remember where we stopped so that we can continue later.
        set_last_setup_region(MemRegion(head.start(), tail.end()));
    }
Exemplo n.º 4
0
inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
  if (UseCompressedOops) {
    if (mr.contains(compressed_klass_addr())) {
      blk->do_oop(compressed_klass_addr());
    }
  } else {
    if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
  }
}
Exemplo n.º 5
0
int klassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
  // Get size before changing pointers
  int size = oop_size(obj);
  Klass* k = Klass::cast(klassOop(obj));
  oop* adr;
  adr = k->adr_super();
  if (mr.contains(adr)) blk->do_oop(adr);
  for (juint i = 0; i < Klass::primary_super_limit(); i++) {
    adr = k->adr_primary_supers()+i;
    if (mr.contains(adr)) blk->do_oop(adr);
  }
  adr = k->adr_secondary_super_cache();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = k->adr_secondary_supers();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = k->adr_java_mirror();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = k->adr_name();
  if (mr.contains(adr)) blk->do_oop(adr);
  // The following are "weak links" in the perm gen and are
  // treated specially in a later phase of a perm gen collection.
  assert(oop(k)->is_perm(), "should be in perm");
  assert(oop(k->adr_subklass())->is_perm(), "should be in perm");
  assert(oop(k->adr_next_sibling())->is_perm(), "should be in perm");
  if (blk->should_remember_klasses()
      && (mr.contains(k->adr_subklass())
          || mr.contains(k->adr_next_sibling()))) {
    blk->remember_klass(k);
  }
  obj->oop_iterate_header(blk, mr);
  return size;
}
Exemplo n.º 6
0
int constantPoolKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
  assert (obj->is_constantPool(), "obj must be constant pool");
  // Performance tweak: We skip iterating over the klass pointer since we
  // know that Universe::constantPoolKlassObj never moves.
  constantPoolOop cp = (constantPoolOop) obj;
  // Get size before changing pointers.
  // Don't call size() or oop_size() since that is a virtual call.
  int size = cp->object_size();

  // If the tags array is null we are in the middle of allocating this constant
  // pool.
  if (cp->tags() != NULL) {
    oop* base = (oop*)cp->base();
    for (int i = 0; i < cp->length(); i++) {
      if (mr.contains(base)) {
        if (cp->is_pointer_entry(i)) {
          blk->do_oop(base);
        }
      }
      base++;
    }
  }
  oop* addr;
  addr = cp->tags_addr();
  blk->do_oop(addr);
  addr = cp->cache_addr();
  blk->do_oop(addr);
  addr = cp->pool_holder_addr();
  blk->do_oop(addr);
  return size;
}
Exemplo n.º 7
0
void
CardTableModRefBS::
process_stride(Space* sp,
               MemRegion used,
               jint stride, int n_strides,
               DirtyCardToOopClosure* dcto_cl,
               MemRegionClosure* cl,
               bool clear,
               jbyte** lowest_non_clean,
               uintptr_t lowest_non_clean_base_chunk_index,
               size_t    lowest_non_clean_chunk_size) {
  // We don't have to go downwards here; it wouldn't help anyway,
  // because of parallelism.

  // Find the first card address of the first chunk in the stride that is
  // at least "bottom" of the used region.
  jbyte*    start_card  = byte_for(used.start());
  jbyte*    end_card    = byte_after(used.last());
  uintptr_t start_chunk = addr_to_chunk_index(used.start());
  uintptr_t start_chunk_stride_num = start_chunk % n_strides;
  jbyte* chunk_card_start;

  if ((uintptr_t)stride >= start_chunk_stride_num) {
    chunk_card_start = (jbyte*)(start_card +
                                (stride - start_chunk_stride_num) *
                                CardsPerStrideChunk);
  } else {
    // Go ahead to the next chunk group boundary, then to the requested stride.
    chunk_card_start = (jbyte*)(start_card +
                                (n_strides - start_chunk_stride_num + stride) *
                                CardsPerStrideChunk);
  }

  while (chunk_card_start < end_card) {
    // We don't have to go downwards here; it wouldn't help anyway,
    // because of parallelism.  (We take care with "min_done"; see below.)
    // Invariant: chunk_mr should be fully contained within the "used" region.
    jbyte*    chunk_card_end = chunk_card_start + CardsPerStrideChunk;
    MemRegion chunk_mr       = MemRegion(addr_for(chunk_card_start),
                                         chunk_card_end >= end_card ?
                                           used.end() : addr_for(chunk_card_end));
    assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
    assert(used.contains(chunk_mr), "chunk_mr should be subset of used");

    // Process the chunk.
    process_chunk_boundaries(sp,
                             dcto_cl,
                             chunk_mr,
                             used,
                             lowest_non_clean,
                             lowest_non_clean_base_chunk_index,
                             lowest_non_clean_chunk_size);

    non_clean_card_iterate_work(chunk_mr, cl, clear);

    // Find the next chunk of the stride.
    chunk_card_start += CardsPerStrideChunk * n_strides;
  }
}
int compiledICHolderKlass::oop_oop_iterate_m(oop obj, OopClosure* blk,
                                              MemRegion mr) {
  assert(obj->is_compiledICHolder(), "must be compiledICHolder");
  compiledICHolderOop c = compiledICHolderOop(obj);
  // Get size before changing pointers.
  // Don't call size() or oop_size() since that is a virtual call.
  int size = c->object_size();

  obj->oop_iterate_header(blk, mr);

  oop* adr;
  adr = c->adr_holder_method();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = c->adr_holder_klass();
  if (mr.contains(adr)) blk->do_oop(adr);
  return size;
}
Exemplo n.º 9
0
int constMethodKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
  assert (obj->is_constMethod(), "object must be constMethod");
  constMethodOop cm = constMethodOop(obj);
  oop* adr;
  adr = cm->adr_method();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = cm->adr_stackmap_data();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = cm->adr_exception_table();
  if (mr.contains(adr)) blk->do_oop(adr);
  // Get size before changing pointers.
  // Don't call size() or oop_size() since that is a virtual call.
  int size = cm->object_size();
  // Performance tweak: We skip iterating over the klass pointer since we
  // know that Universe::constMethodKlassObj never moves.
  return size;
}
Exemplo n.º 10
0
bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  MutableSpace* to_space = heap->young_gen()->to_space();
  MemRegion used = to_space->used_region();
  if (used.contains(lab)) {
    return true;
  }

  return false;
}
Exemplo n.º 11
0
void VirtualCallData::oop_iterate_m(OopClosure* blk, MemRegion mr) {
  for (uint row = 0; row < row_limit(); row++) {
    if (receiver(row) != NULL) {
      oop* adr = adr_receiver(row);
      if (mr.contains(adr)) {
	blk->do_oop(adr);
      }
    }
  }
}  
Exemplo n.º 12
0
int symbolKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
  assert(obj->is_symbol(), "object must be symbol");
  symbolOop s = symbolOop(obj);
  // Get size before changing pointers.
  // Don't call size() or oop_size() since that is a virtual call.
  int size = s->object_size();
  // Performance tweak: We skip iterating over the klass pointer since we 
  // know that Universe::symbolKlassObj never moves.
  oop* adr = (oop*) s->next_addr();
  if (mr.contains(adr)) blk->do_oop(adr);
  return size;
}
Exemplo n.º 13
0
bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  MutableSpace* to_space = heap->young_gen()->to_space();
  MemRegion used = to_space->used_region();
  if (used.contains(lab)) {
    return true;
  }

  return false;
}
Exemplo n.º 14
0
bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) {
  assert(_start_array->covered_region().contains(lab), "Sanity");

  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  PSOldGen* old_gen = heap->old_gen();
  MemRegion used = old_gen->object_space()->used_region();

  if (used.contains(lab)) {
    return true;
  }

  return false;
}
Exemplo n.º 15
0
void ReceiverTypeData::oop_iterate_m(OopClosure* blk, MemRegion mr) {
  // Currently, this interface is called only during card-scanning for
  // a young gen gc, in which case this object cannot contribute anything,
  // since it does not contain any references that cross out of
  // the perm gen. However, for future more general use we allow
  // the possibility of calling for instance from more general
  // iterators (for example, a future regionalized perm gen for G1,
  // or the possibility of moving some references out of perm in
  // the case of other collectors). In that case, you will need
  // to relax or remove some of the assertions below.
#ifdef ASSERT
  // Verify that none of the embedded oop references cross out of
  // this generation.
  for (uint row = 0; row < row_limit(); row++) {
    if (receiver(row) != NULL) {
      oop* adr = adr_receiver(row);
      CollectedHeap* h = Universe::heap();
      assert(h->is_permanent(adr) && h->is_permanent_or_null(*adr), "Not intra-perm");
    }
  }
#endif // ASSERT
  assert(!blk->should_remember_mdo(), "Not expected to remember MDO");
  return;   // Nothing to do, see comment above
#if 0
  if (blk->should_remember_mdo()) {
    // This is a set of weak references that need
    // to be followed at the end of the strong marking
    // phase. Memoize this object so it can be visited
    // in the weak roots processing phase.
    blk->remember_mdo(data());
  } else { // normal scan
    for (uint row = 0; row < row_limit(); row++) {
      if (receiver(row) != NULL) {
        oop* adr = adr_receiver(row);
        if (mr.contains(adr)) {
          blk->do_oop(adr);
        } else if ((HeapWord*)adr >= mr.end()) {
          // Test that the current cursor and the two ends of the range
          // that we may have skipped iterating over are monotonically ordered;
          // this is just a paranoid assertion, just in case represetations
          // should change in the future rendering the short-circuit return
          // here invalid.
          assert((row+1 >= row_limit() || adr_receiver(row+1) > adr) &&
                 (row+2 >= row_limit() || adr_receiver(row_limit()-1) > adr_receiver(row+1)), "Reducing?");
          break; // remaining should be outside this mr too
        }
      }
    }
  }
#endif
}
Exemplo n.º 16
0
int constantPoolCacheKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
  assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
  constantPoolCacheOop cache = (constantPoolCacheOop)obj;
  // Get size before changing pointers.
  // Don't call size() or oop_size() since that is a virtual call.
  int size = cache->object_size();
  // Performance tweak: We skip iterating over the klass pointer since we
  // know that Universe::constantPoolCacheKlassObj never moves.
  // iteration over constant pool cache instance variables
  oop* addr = (oop*)cache->constant_pool_addr();
  if (mr.contains(addr)) blk->do_oop(addr);
  // iteration over constant pool cache entries
  for (int i = 0; i < cache->length(); i++) cache->entry_at(i)->oop_iterate_m(blk, mr);
  return size;
}
Exemplo n.º 17
0
int methodDataKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
  assert (obj->is_methodData(), "object must be method data");
  methodDataOop m = methodDataOop(obj);
  // Get size before changing pointers
  // Don't call size() or oop_size() since that is a virtual call.
  int size = m->object_size();

  obj->oop_iterate_header(blk, mr);
  oop* adr = m->adr_method();
  if (mr.contains(adr)) {
    blk->do_oop(m->adr_method());
  }
  ResourceMark rm;
  for (ProfileData* data = m->first_data(); 
       m->is_valid(data);
       data = m->next_data(data)) {
    data->oop_iterate_m(blk, mr);
  }
  return size;
}
Exemplo n.º 18
0
  void do_MemRegion(MemRegion mr) {
    HeapWord* end_of_non_clean = mr.end();
    HeapWord* start_of_non_clean = end_of_non_clean;
    jbyte* entry = _ct->byte_for(mr.last());
    HeapWord* cur = _ct->addr_for(entry);
    while (mr.contains(cur)) {
      jbyte entry_val = *entry;
      if (!clear_card(entry)) {
	if (start_of_non_clean < end_of_non_clean) {
	  MemRegion mr2(start_of_non_clean, end_of_non_clean);
	  _dirty_card_closure->do_MemRegion(mr2);
	}
	end_of_non_clean = cur;
	start_of_non_clean = end_of_non_clean;
      }
      entry--;
      start_of_non_clean = cur;
      cur = _ct->addr_for(entry);
    }
    if (start_of_non_clean < end_of_non_clean) {
      MemRegion mr2(start_of_non_clean, end_of_non_clean);
      _dirty_card_closure->do_MemRegion(mr2);
    }
  }
Exemplo n.º 19
0
void InterpreterOopMap::oop_iterate(OopClosure *blk, MemRegion mr) {
  if (method() != NULL && mr.contains(&_method)) {
    blk->do_oop((oop*) &_method);
  }
}
Exemplo n.º 20
0
inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
  if (mr.contains(&_klass)) blk->do_oop((oop*)&_klass);
}
int instanceKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk,
					   MemRegion mr) {
  assert(obj->is_klass(),"must be a klass");
  assert(klassOop(obj)->klass_part()->oop_is_instance(), "must be instance klass");
  instanceKlass* ik = instanceKlass::cast(klassOop(obj));
  // Get size before changing pointers.
  // Don't call size() or oop_size() since that is a virtual call.
  int size = ik->object_size();

  ik->iterate_static_fields(blk, mr);
  ik->vtable()->oop_oop_iterate_m(blk, mr);
  ik->itable()->oop_oop_iterate_m(blk, mr);

  oop* adr;
  adr = ik->adr_array_klasses();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_methods();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_method_ordering();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_local_interfaces();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_transitive_interfaces();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_fields();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_constants();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_class_loader();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_protection_domain();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_signers();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_source_file_name();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_source_debug_extension();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_inner_classes();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_implementor();
  if (mr.contains(adr)) blk->do_oop(adr);
  adr = ik->adr_previous_version();
  if (mr.contains(adr)) blk->do_oop(adr);

  klassKlass::oop_oop_iterate_m(obj, blk, mr);

  if(ik->oop_map_cache() != NULL) ik->oop_map_cache()->oop_iterate(blk, mr);
  return size;
}
Exemplo n.º 22
0
 void do_oop(objectRef* p) {
   if (mr.contains(p)) {
     cl->do_oop(p);
   }
 }
void
CardTableModRefBS::
process_stride(Space* sp,
               MemRegion used,
               jint stride, int n_strides,
               OopsInGenClosure* cl,
               CardTableRS* ct,
               jbyte** lowest_non_clean,
               uintptr_t lowest_non_clean_base_chunk_index,
               size_t    lowest_non_clean_chunk_size) {
  // We go from higher to lower addresses here; it wouldn't help that much
  // because of the strided parallelism pattern used here.

  // Find the first card address of the first chunk in the stride that is
  // at least "bottom" of the used region.
  jbyte*    start_card  = byte_for(used.start());
  jbyte*    end_card    = byte_after(used.last());
  uintptr_t start_chunk = addr_to_chunk_index(used.start());
  uintptr_t start_chunk_stride_num = start_chunk % n_strides;
  jbyte* chunk_card_start;

  if ((uintptr_t)stride >= start_chunk_stride_num) {
    chunk_card_start = (jbyte*)(start_card +
                                (stride - start_chunk_stride_num) *
                                ParGCCardsPerStrideChunk);
  } else {
    // Go ahead to the next chunk group boundary, then to the requested stride.
    chunk_card_start = (jbyte*)(start_card +
                                (n_strides - start_chunk_stride_num + stride) *
                                ParGCCardsPerStrideChunk);
  }

  while (chunk_card_start < end_card) {
    // Even though we go from lower to higher addresses below, the
    // strided parallelism can interleave the actual processing of the
    // dirty pages in various ways. For a specific chunk within this
    // stride, we take care to avoid double scanning or missing a card
    // by suitably initializing the "min_done" field in process_chunk_boundaries()
    // below, together with the dirty region extension accomplished in
    // DirtyCardToOopClosure::do_MemRegion().
    jbyte*    chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
    // Invariant: chunk_mr should be fully contained within the "used" region.
    MemRegion chunk_mr       = MemRegion(addr_for(chunk_card_start),
                                         chunk_card_end >= end_card ?
                                           used.end() : addr_for(chunk_card_end));
    assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
    assert(used.contains(chunk_mr), "chunk_mr should be subset of used");

    DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
                                                     cl->gen_boundary());
    ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);


    // Process the chunk.
    process_chunk_boundaries(sp,
                             dcto_cl,
                             chunk_mr,
                             used,
                             lowest_non_clean,
                             lowest_non_clean_base_chunk_index,
                             lowest_non_clean_chunk_size);

    // We want the LNC array updates above in process_chunk_boundaries
    // to be visible before any of the card table value changes as a
    // result of the dirty card iteration below.
    OrderAccess::storestore();

    // We do not call the non_clean_card_iterate_serial() version because
    // we want to clear the cards: clear_cl here does the work of finding
    // contiguous dirty ranges of cards to process and clear.
    clear_cl.do_MemRegion(chunk_mr);

    // Find the next chunk of the stride.
    chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
  }
}