Exemplo n.º 1
0
void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
  assert(!retain || end_of_gc, "Can only retain at GC end.");
  if (_retained) {
    // If the buffer had been retained shorten the previous filler object.
    assert(_retained_filler.end() <= _top, "INVARIANT");
    SharedHeap::fill_region_with_object(_retained_filler);
    _retained = false;
  }
  assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
  if (_top < _hard_end) {
    SharedHeap::fill_region_with_object(MemRegion(_top, _hard_end));
    if (!retain) {
      invalidate();
    } else {
      // Is there wasted space we'd like to retain for the next GC?
      if (pointer_delta(_end, _top) > FillerHeaderSize) {
	_retained = true;
	_retained_filler = MemRegion(_top, FillerHeaderSize);
	_top = _top + FillerHeaderSize;
      } else {
        invalidate();
      }
    }
  }
}
Exemplo n.º 2
0
HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
  HeapWord* res = NULL;
  if (_true_end > _hard_end) {
    assert((HeapWord*)align_size_down(intptr_t(_hard_end),
                                      ChunkSizeInBytes) == _hard_end,
           "or else _true_end should be equal to _hard_end");
    assert(_retained, "or else _true_end should be equal to _hard_end");
    assert(_retained_filler.end() <= _top, "INVARIANT");
    CollectedHeap::fill_with_object(_retained_filler);
    if (_top < _hard_end) {
      fill_region_with_block(MemRegion(_top, _hard_end), true);
    }
    HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
    _retained_filler = MemRegion(_hard_end, FillerHeaderSize);
    _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
    _top      = _retained_filler.end();
    _hard_end = next_hard_end;
    _end      = _hard_end - AlignmentReserve;
    res       = ParGCAllocBuffer::allocate(word_sz);
    if (res != NULL) {
      _bt.alloc_block(res, word_sz);
    }
  }
  return res;
}
Exemplo n.º 3
0
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
                                   size_t& num_regions_deleted) {
  assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
  assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");

  if (_regions.length() == 0) {
    num_regions_deleted = 0;
    return MemRegion();
  }
  int j = _regions.length() - 1;
  HeapWord* end = _regions.at(j)->end();
  HeapWord* last_start = end;
  while (j >= 0 && shrink_bytes > 0) {
    HeapRegion* cur = _regions.at(j);
    // We have to leave humongous regions where they are,
    // and work around them.
    if (cur->isHumongous()) {
      return MemRegion(last_start, end);
    }
    cur->reset_zero_fill();
    assert(cur == _regions.top(), "Should be top");
    if (!cur->is_empty()) break;
    shrink_bytes -= cur->capacity();
    num_regions_deleted++;
    _regions.pop();
    last_start = cur->bottom();
    // We need to delete these somehow, but can't currently do so here: if
    // we do, the ZF thread may still access the deleted region.  We'll
    // leave this here as a reminder that we have to do something about
    // this.
    // delete cur;
    j--;
  }
  return MemRegion(last_start, end);
}
Exemplo n.º 4
0
void MutableSpace::initialize(MemRegion mr,
                              bool clear_space,
                              bool mangle_space,
                              bool setup_pages) {

    assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
           "invalid space boundaries");

    if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
        // The space may move left and right or expand/shrink.
        // We'd like to enforce the desired page placement.
        MemRegion head, tail;
        if (last_setup_region().is_empty()) {
            // If it's the first initialization don't limit the amount of work.
            head = mr;
            tail = MemRegion(mr.end(), mr.end());
        } else {
            // Is there an intersection with the address space?
            MemRegion intersection = last_setup_region().intersection(mr);
            if (intersection.is_empty()) {
                intersection = MemRegion(mr.end(), mr.end());
            }
            // All the sizes below are in words.
            size_t head_size = 0, tail_size = 0;
            if (mr.start() <= intersection.start()) {
                head_size = pointer_delta(intersection.start(), mr.start());
            }
            if(intersection.end() <= mr.end()) {
                tail_size = pointer_delta(mr.end(), intersection.end());
            }
            // Limit the amount of page manipulation if necessary.
            if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
                const size_t change_size = head_size + tail_size;
                const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
                head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
                                 head_size);
                tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
                                 tail_size);
            }
            head = MemRegion(intersection.start() - head_size, intersection.start());
            tail = MemRegion(intersection.end(), intersection.end() + tail_size);
        }
        assert(mr.contains(head) && mr.contains(tail), "Sanity");

        if (UseNUMA) {
            numa_setup_pages(head, clear_space);
            numa_setup_pages(tail, clear_space);
        }

        if (AlwaysPreTouch) {
            pretouch_pages(head);
            pretouch_pages(tail);
        }

        // Remember where we stopped so that we can continue later.
        set_last_setup_region(MemRegion(head.start(), tail.end()));
    }
Exemplo n.º 5
0
  void scanCard(size_t index, HeapRegion *r) {
    _cards_done++;
    DirtyCardToOopClosure* cl =
      r->new_dcto_closure(_oc,
                         CardTableModRefBS::Precise,
                         HeapRegionDCTOC::IntoCSFilterKind);

    // Set the "from" region in the closure.
    _oc->set_region(r);
    HeapWord* card_start = _bot_shared->address_for_index(index);
    HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
    Space *sp = SharedHeap::heap()->space_containing(card_start);
    MemRegion sm_region;
    if (ParallelGCThreads > 0) {
      // first find the used area
      sm_region = sp->used_region_at_save_marks();
    } else {
      // The closure is not idempotent.  We shouldn't look at objects
      // allocated during the GC.
      sm_region = sp->used_region_at_save_marks();
    }
    MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
    if (!mr.is_empty()) {
      cl->do_MemRegion(mr);
    }
  }
Exemplo n.º 6
0
void
ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
  ParGCAllocBuffer::undo_allocation(obj, word_sz);
  // This may back us up beyond the previous threshold, so reset.
  _bt.set_region(MemRegion(_top, _hard_end));
  _bt.initialize_threshold();
}
Exemplo n.º 7
0
ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
                                                 BlockOffsetSharedArray* bsa) :
  ParGCAllocBuffer(word_sz),
  _bsa(bsa),
  _bt(bsa, MemRegion(_bottom, _hard_end)),
  _true_end(_hard_end)
{}
Exemplo n.º 8
0
bool G1ArchiveAllocator::alloc_new_region() {
  // Allocate the highest free region in the reserved heap,
  // and add it to our list of allocated regions. It is marked
  // archive and added to the old set.
  HeapRegion* hr = _g1h->alloc_highest_free_region();
  if (hr == NULL) {
    return false;
  }
  assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
  hr->set_archive();
  _g1h->old_set_add(hr);
  _g1h->hr_printer()->alloc(hr);
  _allocated_regions.append(hr);
  _allocation_region = hr;

  // Set up _bottom and _max to begin allocating in the lowest
  // min_region_size'd chunk of the allocated G1 region.
  _bottom = hr->bottom();
  _max = _bottom + HeapRegion::min_region_size_in_words();

  // Tell mark-sweep that objects in this region are not to be marked.
  G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);

  // Since we've modified the old set, call update_sizes.
  _g1h->g1mm()->update_sizes();
  return true;
}
Exemplo n.º 9
0
// count is number of array elements being written
void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
  assert(count <= (size_t)max_intx, "count too large");
  HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
  // In the case of compressed oops, start and end may potentially be misaligned;
  // so we need to conservatively align the first downward (this is not
  // strictly necessary for current uses, but a case of good hygiene and,
  // if you will, aesthetics) and the second upward (this is essential for
  // current uses) to a HeapWord boundary, so we mark all cards overlapping
  // this write. If this evolves in the future to calling a
  // logging barrier of narrow oop granularity, like the pre-barrier for G1
  // (mentioned here merely by way of example), we will need to change this
  // interface, so it is "exactly precise" (if i may be allowed the adverbial
  // redundancy for emphasis) and does not include narrow oop slots not
  // included in the original write interval.
  HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
  HeapWord* aligned_end   = (HeapWord*)align_size_up  ((uintptr_t)end,   HeapWordSize);
  // If compressed oops were not being used, these should already be aligned
  assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
         "Expected heap word alignment of start and end");
#if 0
  warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
                   start,            count,              aligned_start,   aligned_end);
#endif
  write_ref_array_work(MemRegion(aligned_start, aligned_end));
}
Exemplo n.º 10
0
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
                                     size_t initial_byte_size, int level,
                                     GenRemSet* remset) :
  CardGeneration(rs, initial_byte_size, level, remset)
{
  HeapWord* bottom = (HeapWord*) _virtual_space.low();
  HeapWord* end    = (HeapWord*) _virtual_space.high();
  _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  _the_space->reset_saved_mark();
  _shrink_factor = 0;
  _capacity_at_prologue = 0;

  _gc_stats = new GCStats();

  // initialize performance counters

  const char* gen_name = "old";
  GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();

  // Generation Counters -- generation 1, 1 subspace
  _gen_counters = new GenerationCounters(gen_name, 1, 1,
      gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);

  _gc_counters = new CollectorCounters("MSC", 1);

  _space_counters = new CSpaceCounters(gen_name, 0,
                                       _virtual_space.reserved_size(),
                                       _the_space, _gen_counters);
}
Exemplo n.º 11
0
void
CardTableModRefBS::
process_stride(Space* sp,
               MemRegion used,
               jint stride, int n_strides,
               DirtyCardToOopClosure* dcto_cl,
               MemRegionClosure* cl,
               bool clear,
               jbyte** lowest_non_clean,
               uintptr_t lowest_non_clean_base_chunk_index,
               size_t    lowest_non_clean_chunk_size) {
  // We don't have to go downwards here; it wouldn't help anyway,
  // because of parallelism.

  // Find the first card address of the first chunk in the stride that is
  // at least "bottom" of the used region.
  jbyte*    start_card  = byte_for(used.start());
  jbyte*    end_card    = byte_after(used.last());
  uintptr_t start_chunk = addr_to_chunk_index(used.start());
  uintptr_t start_chunk_stride_num = start_chunk % n_strides;
  jbyte* chunk_card_start;

  if ((uintptr_t)stride >= start_chunk_stride_num) {
    chunk_card_start = (jbyte*)(start_card +
                                (stride - start_chunk_stride_num) *
                                CardsPerStrideChunk);
  } else {
    // Go ahead to the next chunk group boundary, then to the requested stride.
    chunk_card_start = (jbyte*)(start_card +
                                (n_strides - start_chunk_stride_num + stride) *
                                CardsPerStrideChunk);
  }

  while (chunk_card_start < end_card) {
    // We don't have to go downwards here; it wouldn't help anyway,
    // because of parallelism.  (We take care with "min_done"; see below.)
    // Invariant: chunk_mr should be fully contained within the "used" region.
    jbyte*    chunk_card_end = chunk_card_start + CardsPerStrideChunk;
    MemRegion chunk_mr       = MemRegion(addr_for(chunk_card_start),
                                         chunk_card_end >= end_card ?
                                           used.end() : addr_for(chunk_card_end));
    assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
    assert(used.contains(chunk_mr), "chunk_mr should be subset of used");

    // Process the chunk.
    process_chunk_boundaries(sp,
                             dcto_cl,
                             chunk_mr,
                             used,
                             lowest_non_clean,
                             lowest_non_clean_base_chunk_index,
                             lowest_non_clean_chunk_size);

    non_clean_card_iterate_work(chunk_mr, cl, clear);

    // Find the next chunk of the stride.
    chunk_card_start += CardsPerStrideChunk * n_strides;
  }
}
Exemplo n.º 12
0
void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
                                                          HeapRegion* hr,
                                                          HeapWord* end) {
  hr->prepare_for_compaction(cp);
  // Also clear the part of the card table that will be unused after
  // compaction.
  _mrbs->clear(MemRegion(hr->compaction_top(), end));
}
Exemplo n.º 13
0
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parsability.
void MutableNUMASpace::ensure_parsability() {
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
    LGRPSpace *ls = lgrp_spaces()->at(i);
    MutableSpace *s = ls->space();
    if (s->top() < top()) { // For all spaces preceding the one containing top()
      if (s->free_in_words() > 0) {
        intptr_t cur_top = (intptr_t)s->top();
        size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
        while (words_left_to_fill > 0) {
          size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
          assert(words_to_fill >= CollectedHeap::min_fill_size(),
                 "Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
                 words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size());
          CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
          if (!os::numa_has_static_binding()) {
            size_t touched_words = words_to_fill;
#ifndef ASSERT
            if (!ZapUnusedHeapArea) {
              touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
                touched_words);
            }
#endif
            MemRegion invalid;
            HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
            HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
            if (crossing_start != crossing_end) {
              // If object header crossed a small page boundary we mark the area
              // as invalid rounding it to a page_size().
              HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
              HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
              invalid = MemRegion(start, end);
            }

            ls->add_invalid_region(invalid);
          }
          cur_top = cur_top + (words_to_fill * HeapWordSize);
          words_left_to_fill -= words_to_fill;
        }
      }
    } else {
      if (!os::numa_has_static_binding()) {
#ifdef ASSERT
        MemRegion invalid(s->top(), s->end());
        ls->add_invalid_region(invalid);
#else
        if (ZapUnusedHeapArea) {
          MemRegion invalid(s->top(), s->end());
          ls->add_invalid_region(invalid);
        } else {
          return;
        }
#endif
      } else {
          return;
      }
    }
  }
}
Exemplo n.º 14
0
 // Support for MT garbage collection. CAS allocation is lower overhead than grabbing
 // and releasing the heap lock, which is held during gc's anyway. This method is not
 // safe for use at the same time as allocate_noexpand()!
 HeapWord* cas_allocate_noexpand(size_t word_size) {
   assert(SafepointSynchronize::is_at_safepoint(), "Must only be called at safepoint");
   HeapWord* res = object_space()->cas_allocate(word_size);
   if (res != NULL) {
     DEBUG_ONLY(assert_block_in_covered_region(MemRegion(res, word_size)));
     _start_array.allocate_block(res);
   }
   return res;
 }
Exemplo n.º 15
0
void HeapRegion::par_clear() {
  assert(used() == 0, "the region should have been already cleared");
  assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
  HeapRegionRemSet* hrrs = rem_set();
  hrrs->clear();
  CardTableModRefBS* ct_bs =
                   (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
  ct_bs->clear(MemRegion(bottom(), end()));
}
inline void CMSBitMap::par_markRange(MemRegion mr) {
  assert_locked();
  mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
  assert(!mr.is_empty(), "unexpected empty region");
  // convert address range into offset range
  size_t start_ofs = heapWordToOffset(mr.start());
  size_t end_ofs = heapWordToOffset(mr.end());
  // Range size is usually just 1 bit.
  _bm.par_set_range(start_ofs, end_ofs, BitMap::small_range);
}
Exemplo n.º 17
0
 HeapWord* allocate_noexpand(size_t word_size) {
   // We assume the heap lock is held here.
   assert_locked_or_safepoint(Heap_lock);
   HeapWord* res = object_space()->allocate(word_size);
   if (res != NULL) {
     DEBUG_ONLY(assert_block_in_covered_region(MemRegion(res, word_size)));
     _start_array.allocate_block(res);
   }
   return res;
 }
Exemplo n.º 18
0
MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
                                   HeapWord* new_end,
                                   FreeRegionList* list) {
  assert(old_end < new_end, "don't call it otherwise");
  G1CollectedHeap* g1h = G1CollectedHeap::heap();

  HeapWord* next_bottom = old_end;
  assert(heap_bottom() <= next_bottom, "invariant");
  while (next_bottom < new_end) {
    assert(next_bottom < heap_end(), "invariant");
    uint index = length();

    assert(index < max_length(), "otherwise we cannot expand further");
    if (index == 0) {
      // We have not allocated any regions so far
      assert(next_bottom == heap_bottom(), "invariant");
    } else {
      // next_bottom should match the end of the last/previous region
      assert(next_bottom == at(index - 1)->end(), "invariant");
    }

    if (index == _allocated_length) {
      // We have to allocate a new HeapRegion.
      HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
      if (new_hr == NULL) {
        // allocation failed, we bail out and return what we have done so far
        return MemRegion(old_end, next_bottom);
      }
      assert(_regions.get_by_index(index) == NULL, "invariant");
      _regions.set_by_index(index, new_hr);
      increment_allocated_length();
    }
    // Have to increment the length first, otherwise we will get an
    // assert failure at(index) below.
    increment_length();
    HeapRegion* hr = at(index);
    list->add_as_tail(hr);

    next_bottom = hr->end();
  }
  assert(next_bottom == new_end, "post-condition");
  return MemRegion(old_end, next_bottom);
}
Exemplo n.º 19
0
 void free_humongous_region(HeapRegion* hr) {
   HeapWord* bot = hr->bottom();
   HeapWord* end = hr->end();
   assert(hr->startsHumongous(),
          "Only the start of a humongous region should be freed.");
   G1CollectedHeap::heap()->free_region(hr);
   hr->prepare_for_compaction(&_cp);
   // Also clear the part of the card table that will be unused after
   // compaction.
   _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
 }
Exemplo n.º 20
0
void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
  assert(!mr.is_empty(), "Should be non-empty");
  // We use MemRegion(bottom(), end()) rather than used_region() below
  // because the two are not necessarily equal for some kinds of
  // spaces, in particular, certain kinds of free list spaces.
  // We could use the more complicated but more precise:
  // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
  // but the slight imprecision seems acceptable in the assertion check.
  assert(MemRegion(bottom(), end()).contains(mr),
         "Should be within used space");
  HeapWord* prev = cl->previous();   // max address from last time
  if (prev >= mr.end()) { // nothing to do
    return;
  }
  // This assert will not work when we go from cms space to perm
  // space, and use same closure. Easy fix deferred for later. XXX YSR
  // assert(prev == NULL || contains(prev), "Should be within space");

  bool last_was_obj_array = false;
  HeapWord *blk_start_addr, *region_start_addr;
  if (prev > mr.start()) {
    region_start_addr = prev;
    blk_start_addr    = prev;
    // The previous invocation may have pushed "prev" beyond the
    // last allocated block yet there may be still be blocks
    // in this region due to a particular coalescing policy.
    // Relax the assertion so that the case where the unallocated
    // block is maintained and "prev" is beyond the unallocated
    // block does not cause the assertion to fire.
    assert((BlockOffsetArrayUseUnallocatedBlock &&
            (!is_in(prev))) ||
           (blk_start_addr == block_start(region_start_addr)), "invariant");
  } else {
    region_start_addr = mr.start();
    blk_start_addr    = block_start(region_start_addr);
  }
  HeapWord* region_end_addr = mr.end();
  MemRegion derived_mr(region_start_addr, region_end_addr);
  while (blk_start_addr < region_end_addr) {
    const size_t size = block_size(blk_start_addr);
    if (block_is_obj(blk_start_addr)) {
      last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
    } else {
      last_was_obj_array = false;
    }
    blk_start_addr += size;
  }
  if (!last_was_obj_array) {
    assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
           "Should be within (closed) used space");
    assert(blk_start_addr > prev, "Invariant");
    cl->set_previous(blk_start_addr); // min address for next time
  }
}
Exemplo n.º 21
0
size_t ContiguousSpace::block_size(const HeapWord* p) const {
  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  HeapWord* current_top = top();
  assert(p <= current_top, "p is not a block start");
  assert(p == current_top || oop(p)->is_oop(), "p is not a block start");
  if (p < current_top)
    return oop(p)->size();
  else {
    assert(p == current_top, "just checking");
    return pointer_delta(end(), (HeapWord*) p);
  }
}
Exemplo n.º 22
0
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parseability.
void MutableNUMASpace::ensure_parsability() {
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
    LGRPSpace *ls = lgrp_spaces()->at(i);
    MutableSpace *s = ls->space();
    if (s->top() < top()) { // For all spaces preceding the one containing top()
      if (s->free_in_words() > 0) {
        size_t area_touched_words = pointer_delta(s->end(), s->top());
        CollectedHeap::fill_with_object(s->top(), area_touched_words);
#ifndef ASSERT
        if (!ZapUnusedHeapArea) {
          area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
                                    area_touched_words);
        }
#endif
        if (!os::numa_has_static_binding()) {
          MemRegion invalid;
          HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
          HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
                                                       os::vm_page_size());
          if (crossing_start != crossing_end) {
            // If object header crossed a small page boundary we mark the area
            // as invalid rounding it to a page_size().
            HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
            HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
                                 s->end());
            invalid = MemRegion(start, end);
          }

          ls->add_invalid_region(invalid);
        }
      }
    } else {
      if (!os::numa_has_static_binding()) {
#ifdef ASSERT
        MemRegion invalid(s->top(), s->end());
        ls->add_invalid_region(invalid);
#else
        if (ZapUnusedHeapArea) {
          MemRegion invalid(s->top(), s->end());
          ls->add_invalid_region(invalid);
        } else {
          return;
        }
#endif
      } else {
          return;
      }
    }
  }
}
Exemplo n.º 23
0
    void free_humongous_region(HeapRegion* hr) {
        HeapWord* end = hr->end();
        size_t dummy_pre_used;
        FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");

        assert(hr->startsHumongous(),
               "Only the start of a humongous region should be freed.");
        _g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list,
                                    &_humongous_proxy_set, false /* par */);
        hr->prepare_for_compaction(&_cp);
        // Also clear the part of the card table that will be unused after
        // compaction.
        _mrbs->clear(MemRegion(hr->compaction_top(), end));
        dummy_free_list.remove_all();
    }
Exemplo n.º 24
0
// Very general, slow implementation.
HeapWord* ContiguousSpace::block_start_const(const void* p) const {
  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  if (p >= top()) {
    return top();
  } else {
    HeapWord* last = bottom();
    HeapWord* cur = last;
    while (cur <= p) {
      last = cur;
      cur += oop(cur)->size();
    }
    assert(oop(last)->is_oop(), "Should be an object start");
    return last;
  }
}
Exemplo n.º 25
0
MemRegion MemRegion::_union(const MemRegion mr2) const {
  // If one region is empty, return the other
  if (is_empty()) return mr2;
  if (mr2.is_empty()) return MemRegion(start(), end());

  // Otherwise, regions must overlap or be adjacent
  assert(((start() <= mr2.start()) && (end() >= mr2.start())) ||
         ((mr2.start() <= start()) && (mr2.end() >= start())),
             "non-adjacent or overlapping regions");
  MemRegion res;
  HeapWord* res_start = MIN2(start(), mr2.start());
  HeapWord* res_end   = MAX2(end(),   mr2.end());
  res.set_start(res_start);
  res.set_end(res_end);
  return res;
}
Exemplo n.º 26
0
Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
    _level(level),
    _ref_processor(NULL) {
    if (!_virtual_space.initialize(rs, initial_size)) {
        vm_exit_during_initialization("Could not reserve enough space for "
                                      "object heap");
    }
    // Mangle all of the the initial generation.
    if (ZapUnusedHeapArea) {
        MemRegion mangle_region((HeapWord*)_virtual_space.low(),
                                (HeapWord*)_virtual_space.high());
        SpaceMangler::mangle_region(mangle_region);
    }
    _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
                          (HeapWord*)_virtual_space.high_boundary());
}
Exemplo n.º 27
0
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
                                     size_t initial_byte_size, int level,
                                     GenRemSet* remset) :
  OneContigSpaceCardGeneration(rs, initial_byte_size,
                               MinHeapDeltaBytes, level, remset, NULL)
{
  HeapWord* bottom = (HeapWord*) _virtual_space.low();
  HeapWord* end    = (HeapWord*) _virtual_space.high();
  _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  _the_space->reset_saved_mark();
  _shrink_factor = 0;
  _capacity_at_prologue = 0;

  _gc_stats = new GCStats();

  // initialize performance counters

  const char* gen_name = "old";

  // Generation Counters -- generation 1, 1 subspace
  _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);

  _gc_counters = new CollectorCounters("MSC", 1);

  _space_counters = new CSpaceCounters(gen_name, 0,
                                       _virtual_space.reserved_size(),
                                       _the_space, _gen_counters);
#ifndef SERIALGC
  if (UseParNewGC && ParallelGCThreads > 0) {
    typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
    _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
                                      ParallelGCThreads);
    if (_alloc_buffers == NULL)
      vm_exit_during_initialization("Could not allocate alloc_buffers");
    for (uint i = 0; i < ParallelGCThreads; i++) {
      _alloc_buffers[i] =
        new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
      if (_alloc_buffers[i] == NULL)
        vm_exit_during_initialization("Could not allocate alloc_buffers");
    }
  } else {
    _alloc_buffers = NULL;
  }
#endif // SERIALGC
}
Exemplo n.º 28
0
Arquivo: space.cpp Projeto: dain/graal
// Very general, slow implementation.
HeapWord* ContiguousSpace::block_start_const(const void* p) const {
  assert(MemRegion(bottom(), end()).contains(p),
         err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
                  p, bottom(), end()));
  if (p >= top()) {
    return top();
  } else {
    HeapWord* last = bottom();
    HeapWord* cur = last;
    while (cur <= p) {
      last = cur;
      cur += oop(cur)->size();
    }
    assert(oop(last)->is_oop(),
           err_msg(PTR_FORMAT " should be an object start", last));
    return last;
  }
}
Exemplo n.º 29
0
Arquivo: space.cpp Projeto: dain/graal
size_t ContiguousSpace::block_size(const HeapWord* p) const {
  assert(MemRegion(bottom(), end()).contains(p),
         err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
                  p, bottom(), end()));
  HeapWord* current_top = top();
  assert(p <= current_top,
         err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
                  p, current_top));
  assert(p == current_top || oop(p)->is_oop(),
         err_msg("p (" PTR_FORMAT ") is not a block start - "
                 "current_top: " PTR_FORMAT ", is_oop: %s",
                 p, current_top, BOOL_TO_STR(oop(p)->is_oop())));
  if (p < current_top) {
    return oop(p)->size();
  } else {
    assert(p == current_top, "just checking");
    return pointer_delta(end(), (HeapWord*) p);
  }
}
Exemplo n.º 30
0
CompactingPermGen::CompactingPermGen(ReservedSpace rs,
				     size_t initial_byte_size,
				     GenRemSet* remset)
{
  CompactingPermGenGen* g =
    new CompactingPermGenGen(rs, initial_byte_size, -1, remset, NULL);
  if (g == NULL)
    vm_exit_during_initialization("Could not allocate a CompactingPermGen");
  
  HeapWord* bottom = (HeapWord*) g->_virtual_space.low();
  HeapWord* end    = (HeapWord*) g->_virtual_space.high();
  g->_the_space    = new ContigPermSpace(g->_bts, MemRegion(bottom, end));
  if (g->_the_space == NULL)
    vm_exit_during_initialization("Could not allocate a CompactingPermGen Space");
  _gen = g;

  g->initialize_performance_counters();

  _capacity_expansion_limit = g->capacity() + MaxPermHeapExpansion;
}