inline size_t
HeapRegion::block_size(const HeapWord *addr) const {
  if (addr == top()) {
    return pointer_delta(end(), addr);
  }

  if (block_is_obj(addr)) {
    return oop(addr)->size();
  }

  assert(ClassUnloadingWithConcurrentMark,
      err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
              "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
              "addr: " PTR_FORMAT,
              p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));

  // Old regions' dead objects may have dead classes
  // We need to find the next live object in some other
  // manner than getting the oop size
  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
      getNextMarkedWordAddress(addr, prev_top_at_mark_start());

  assert(next > addr, "must get the next live object");
  return pointer_delta(next, addr);
}
Exemplo n.º 2
0
int
constantPoolKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
                                       HeapWord* beg_addr, HeapWord* end_addr) {
  assert (obj->is_constantPool(), "obj must be constant pool");
  constantPoolOop cp = (constantPoolOop) obj;

  // If the tags array is null we are in the middle of allocating this constant
  // pool.
  if (cp->tags() != NULL) {
    oop* base = (oop*)cp->base();
    oop* const beg_oop = MAX2((oop*)beg_addr, base);
    oop* const end_oop = MIN2((oop*)end_addr, base + cp->length());
    const size_t beg_idx = pointer_delta(beg_oop, base, sizeof(oop*));
    const size_t end_idx = pointer_delta(end_oop, base, sizeof(oop*));
    for (size_t cur_idx = beg_idx; cur_idx < end_idx; ++cur_idx, ++base) {
      if (cp->is_pointer_entry(int(cur_idx))) {
        PSParallelCompact::adjust_pointer(base);
      }
    }
  }

  oop* p;
  p = cp->tags_addr();
  PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
  p = cp->cache_addr();
  PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
  p = cp->pool_holder_addr();
  PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);

  return cp->object_size();
}
// This method assumes that from-space has live data and that
// any shrinkage of the young gen is limited by location of
// from-space.
size_t PSYoungGen::available_to_live() {
  size_t delta_in_survivor = 0;
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  const size_t space_alignment = heap->intra_heap_alignment();
  const size_t gen_alignment = heap->young_gen_alignment();

  MutableSpace* space_shrinking = NULL;
  if (from_space()->end() > to_space()->end()) {
    space_shrinking = from_space();
  } else {
    space_shrinking = to_space();
  }

  // Include any space that is committed but not included in
  // the survivor spaces.
  assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
    "Survivor space beyond high end");
  size_t unused_committed = pointer_delta(virtual_space()->high(),
    space_shrinking->end(), sizeof(char));

  if (space_shrinking->is_empty()) {
    // Don't let the space shrink to 0
    assert(space_shrinking->capacity_in_bytes() >= space_alignment,
      "Space is too small");
    delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
  } else {
    delta_in_survivor = pointer_delta(space_shrinking->end(),
                                      space_shrinking->top(),
                                      sizeof(char));
  }

  size_t delta_in_bytes = unused_committed + delta_in_survivor;
  delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
  return delta_in_bytes;
}
Exemplo n.º 4
0
HeapRegion* OldGCAllocRegion::release() {
  HeapRegion* cur = get();
  if (cur != NULL) {
    // Determine how far we are from the next card boundary. If it is smaller than
    // the minimum object size we can allocate into, expand into the next card.
    HeapWord* top = cur->top();
    HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);

    size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);

    if (to_allocate_words != 0) {
      // We are not at a card boundary. Fill up, possibly into the next, taking the
      // end of the region and the minimum object size into account.
      to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
                               MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));

      // Skip allocation if there is not enough space to allocate even the smallest
      // possible object. In this case this region will not be retained, so the
      // original problem cannot occur.
      if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
        HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
        CollectedHeap::fill_with_object(dummy, to_allocate_words);
      }
    }
  }
  return G1AllocRegion::release();
}
Exemplo n.º 5
0
void MutableSpace::initialize(MemRegion mr,
                              bool clear_space,
                              bool mangle_space,
                              bool setup_pages) {

    assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
           "invalid space boundaries");

    if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
        // The space may move left and right or expand/shrink.
        // We'd like to enforce the desired page placement.
        MemRegion head, tail;
        if (last_setup_region().is_empty()) {
            // If it's the first initialization don't limit the amount of work.
            head = mr;
            tail = MemRegion(mr.end(), mr.end());
        } else {
            // Is there an intersection with the address space?
            MemRegion intersection = last_setup_region().intersection(mr);
            if (intersection.is_empty()) {
                intersection = MemRegion(mr.end(), mr.end());
            }
            // All the sizes below are in words.
            size_t head_size = 0, tail_size = 0;
            if (mr.start() <= intersection.start()) {
                head_size = pointer_delta(intersection.start(), mr.start());
            }
            if(intersection.end() <= mr.end()) {
                tail_size = pointer_delta(mr.end(), intersection.end());
            }
            // Limit the amount of page manipulation if necessary.
            if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
                const size_t change_size = head_size + tail_size;
                const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
                head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
                                 head_size);
                tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
                                 tail_size);
            }
            head = MemRegion(intersection.start() - head_size, intersection.start());
            tail = MemRegion(intersection.end(), intersection.end() + tail_size);
        }
        assert(mr.contains(head) && mr.contains(tail), "Sanity");

        if (UseNUMA) {
            numa_setup_pages(head, clear_space);
            numa_setup_pages(tail, clear_space);
        }

        if (AlwaysPreTouch) {
            pretouch_pages(head);
            pretouch_pages(tail);
        }

        // Remember where we stopped so that we can continue later.
        set_last_setup_region(MemRegion(head.start(), tail.end()));
    }
Exemplo n.º 6
0
void Forte::register_stub(const char* name, address start, address end) {
#if !defined(_WINDOWS) && !defined(IA64)
  assert(pointer_delta(end, start, sizeof(jbyte)) < INT_MAX,
    "Code size exceeds maximum range")

  collector_func_load((char*)name, NULL, NULL, start,
    pointer_delta(end, start, sizeof(jbyte)), 0, NULL);
#endif // !_WINDOWS && !IA64
}
Exemplo n.º 7
0
HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
  assert(word_size != 0, "size must not be zero");
  if (_allocation_region == NULL) {
    if (!alloc_new_region()) {
      return NULL;
    }
  }
  HeapWord* old_top = _allocation_region->top();
  assert(_bottom >= _allocation_region->bottom(),
         "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
         p2i(_bottom), p2i(_allocation_region->bottom()));
  assert(_max <= _allocation_region->end(),
         "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
         p2i(_max), p2i(_allocation_region->end()));
  assert(_bottom <= old_top && old_top <= _max,
         "inconsistent allocation state: expected "
         PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
         p2i(_bottom), p2i(old_top), p2i(_max));

  // Allocate the next word_size words in the current allocation chunk.
  // If allocation would cross the _max boundary, insert a filler and begin
  // at the base of the next min_region_size'd chunk. Also advance to the next
  // chunk if we don't yet cross the boundary, but the remainder would be too
  // small to fill.
  HeapWord* new_top = old_top + word_size;
  size_t remainder = pointer_delta(_max, new_top);
  if ((new_top > _max) ||
      ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
    if (old_top != _max) {
      size_t fill_size = pointer_delta(_max, old_top);
      CollectedHeap::fill_with_object(old_top, fill_size);
      _summary_bytes_used += fill_size * HeapWordSize;
    }
    _allocation_region->set_top(_max);
    old_top = _bottom = _max;

    // Check if we've just used up the last min_region_size'd chunk
    // in the current region, and if so, allocate a new one.
    if (_bottom != _allocation_region->end()) {
      _max = _bottom + HeapRegion::min_region_size_in_words();
    } else {
      if (!alloc_new_region()) {
        return NULL;
      }
      old_top = _allocation_region->bottom();
    }
  }
  _allocation_region->set_top(old_top + word_size);
  _summary_bytes_used += word_size * HeapWordSize;

  return old_top;
}
Exemplo n.º 8
0
// This method assumes that from-space has live data and that
// any shrinkage of the young gen is limited by location of
// from-space.
size_t ASParNewGeneration::available_to_live() const {
#undef SHRINKS_AT_END_OF_EDEN
#ifdef SHRINKS_AT_END_OF_EDEN
  size_t delta_in_survivor = 0;
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  const size_t space_alignment = heap->intra_heap_alignment();
  const size_t gen_alignment = heap->object_heap_alignment();

  MutableSpace* space_shrinking = NULL;
  if (from_space()->end() > to_space()->end()) {
    space_shrinking = from_space();
  } else {
    space_shrinking = to_space();
  }

  // Include any space that is committed but not included in
  // the survivor spaces.
  assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
    "Survivor space beyond high end");
  size_t unused_committed = pointer_delta(virtual_space()->high(),
    space_shrinking->end(), sizeof(char));

  if (space_shrinking->is_empty()) {
    // Don't let the space shrink to 0
    assert(space_shrinking->capacity_in_bytes() >= space_alignment,
      "Space is too small");
    delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
  } else {
    delta_in_survivor = pointer_delta(space_shrinking->end(),
                                      space_shrinking->top(),
                                      sizeof(char));
  }

  size_t delta_in_bytes = unused_committed + delta_in_survivor;
  delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
  return delta_in_bytes;
#else
  // The only space available for shrinking is in to-space if it
  // is above from-space.
  if (to()->bottom() > from()->bottom()) {
    const size_t alignment = os::vm_page_size();
    if (to()->capacity() < alignment) {
      return 0;
    } else {
      return to()->capacity() - alignment;
    }
  } else {
    return 0;
  }
#endif
}
Exemplo n.º 9
0
HeapWord* ConcEdenSpace::par_allocate(size_t size)
{
  do {
    // The invariant is top() should be read before end() because
    // top() can't be greater than end(), so if an update of _soft_end
    // occurs between 'end_val = end();' and 'top_val = top();' top()
    // also can grow up to the new end() and the condition
    // 'top_val > end_val' is true. To ensure the loading order
    // OrderAccess::loadload() is required after top() read.
    HeapWord* obj = top();
    OrderAccess::loadload();
    if (pointer_delta(*soft_end_addr(), obj) >= size) {
      HeapWord* new_top = obj + size;
      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
      // result can be one of two:
      //  the old top value: the exchange succeeded
      //  otherwise: the new value of the top is returned.
      if (result == obj) {
        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
        return obj;
      }
    } else {
      return NULL;
    }
  } while (true);
}
 // Force future allocations to fail and queries for contains()
 // to return false
 void invalidate() {
   assert(!_retained, "Shouldn't retain an invalidated buffer.");
   _end    = _hard_end;
   _wasted += pointer_delta(_end, _top);  // unused  space
   _top    = _end;      // force future allocations to fail
   _bottom = _end;      // force future contains() queries to return false
 }
Exemplo n.º 11
0
void Flag::print_on(outputStream* st, bool withComments) {
  st->print("%9s %-40s %c= ", type, name, (origin != DEFAULT ? ':' : ' '));
  if (is_bool())     st->print("%-16s", get_bool() ? "true" : "false");
  if (is_intx())     st->print("%-16ld", get_intx());
  if (is_uintx())    st->print("%-16lu", get_uintx());
  if (is_uint64_t()) st->print("%-16lu", get_uint64_t());
  if (is_double())   st->print("%-16f", get_double());

  if (is_ccstr()) {
     const char* cp = get_ccstr();
     if (cp != NULL) {
       const char* eol;
       while ((eol = strchr(cp, '\n')) != NULL) {
         char format_buffer[FORMAT_BUFFER_LEN];
         size_t llen = pointer_delta(eol, cp, sizeof(char));
         jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
                     "%%." SIZE_FORMAT "s", llen);
         st->print(format_buffer, cp);
         st->cr();
         cp = eol+1;
         st->print("%5s %-35s += ", "", name);
       }
       st->print("%-16s", cp);
     }
     else st->print("%-16s", "");
  }
  st->print("%-20s", kind);
  if (withComments) {
#ifndef PRODUCT
    st->print("%s", doc );
#endif
  }
  st->cr();
}
void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
  check_index(index, "index out of range");
  assert(high >= low, "addresses out of order");
  size_t offset = pointer_delta(high, low);
  check_offset(offset, "offset too large");
  set_offset_array(index, (u_char)offset);
}
Exemplo n.º 13
0
void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
  assert(!retain || end_of_gc, "Can only retain at GC end.");
  if (_retained) {
    // If the buffer had been retained shorten the previous filler object.
    assert(_retained_filler.end() <= _top, "INVARIANT");
    SharedHeap::fill_region_with_object(_retained_filler);
    _retained = false;
  }
  assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
  if (_top < _hard_end) {
    SharedHeap::fill_region_with_object(MemRegion(_top, _hard_end));
    if (!retain) {
      invalidate();
    } else {
      // Is there wasted space we'd like to retain for the next GC?
      if (pointer_delta(_end, _top) > FillerHeaderSize) {
	_retained = true;
	_retained_filler = MemRegion(_top, FillerHeaderSize);
	_top = _top + FillerHeaderSize;
      } else {
        invalidate();
      }
    }
  }
}
Exemplo n.º 14
0
void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) {
  guarantee(start_page < end_page,
            "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page);

  char* start_addr = page_start(start_page);
  os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
}
Exemplo n.º 15
0
// Fill all remaining lab space with an unreachable object.
// The goal is to leave a contiguous parseable span of objects.
void PSPromotionLAB::flush() {
  assert(_state != flushed, "Attempt to flush PLAB twice");
  assert(top() <= end(), "pointers out of order");
  
  // If we were initialized to a zero sized lab, there is
  // nothing to flush
  if (_state == zero_size)
    return;

  // PLAB's never allocate the last aligned_header_size 
  // so they can always fill with an array.
  HeapWord* tlab_end = end() + filler_header_size;
  typeArrayOop filler_oop = (typeArrayOop) top();
  filler_oop->set_mark();
  filler_oop->set_klass(Universe::intArrayKlassObj());
  const size_t array_length =
    pointer_delta(tlab_end, top()) - typeArrayOopDesc::header_size(T_INT);
  assert( (array_length * (HeapWordSize/sizeof(jint))) < max_jint, "array too big in PSPromotionLAB");
  filler_oop->set_length((int)(array_length * (HeapWordSize/sizeof(jint))));

#ifdef ASSERT
  // Note that we actually DO NOT want to use the aligned header size!
  HeapWord* elt_words = ((HeapWord*)filler_oop) + typeArrayOopDesc::header_size(T_INT);
  Memory::set_words(elt_words, array_length, 0xDEAABABE);
#endif
  
  set_bottom(NULL);
  set_end(NULL);
  set_top(NULL);

  _state = flushed;
}
Exemplo n.º 16
0
/*
 * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
 * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
 * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
 * implementation to be used instead.
 */
inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest,
                             jbyte compare_value, cmpxchg_memory_order order) {
    STATIC_ASSERT(sizeof(jbyte) == 1);
    volatile jint* dest_int =
        static_cast<volatile jint*>(align_ptr_down(dest, sizeof(jint)));
    size_t offset = pointer_delta(dest, dest_int, 1);
    jint cur = *dest_int;
    jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur);

    // current value may not be what we are looking for, so force it
    // to that value so the initial cmpxchg will fail if it is different
    cur_as_bytes[offset] = compare_value;

    // always execute a real cmpxchg so that we get the required memory
    // barriers even on initial failure
    do {
        // value to swap in matches current value ...
        jint new_value = cur;
        // ... except for the one jbyte we want to update
        reinterpret_cast<jbyte*>(&new_value)[offset] = exchange_value;

        jint res = cmpxchg(new_value, dest_int, cur, order);
        if (res == cur) break; // success

        // at least one jbyte in the jint changed value, so update
        // our view of the current jint
        cur = res;
        // if our jbyte is still as cur we loop and try again
    } while (cur_as_bytes[offset] == compare_value);

    return cur_as_bytes[offset];
}
Exemplo n.º 17
0
void ContiguousSpace::allocate_temporary_filler(int factor) {
  // allocate temporary type array decreasing free size with factor 'factor'
  assert(factor >= 0, "just checking");
  size_t size = pointer_delta(end(), top());

  // if space is full, return
  if (size == 0) return;

  if (factor > 0) {
    size -= size/factor;
  }
  size = align_object_size(size);

  const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
  if (size >= (size_t)align_object_size(array_header_size)) {
    size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
    // allocate uninitialized int array
    typeArrayOop t = (typeArrayOop) allocate(size);
    assert(t != NULL, "allocation should succeed");
    t->set_mark(markOopDesc::prototype());
    t->set_klass(Universe::intArrayKlassObj());
    t->set_length((int)length);
  } else {
    assert(size == CollectedHeap::min_fill_size(),
           "size for smallest fake object doesn't match");
    instanceOop obj = (instanceOop) allocate(size);
    obj->set_mark(markOopDesc::prototype());
    obj->set_klass_gap(0);
    obj->set_klass(SystemDictionary::Object_klass());
  }
}
Exemplo n.º 18
0
void SharedHeap::fill_region_with_object(MemRegion mr) {
  // Disable allocation events, since this isn't a "real" allocation.
  JVMPIAllocEventDisabler dis;  

  size_t word_size = mr.word_size();
  size_t aligned_array_header_size =
    align_object_size(typeArrayOopDesc::header_size(T_INT));

  if (word_size >= aligned_array_header_size) {
    const size_t array_length =
      pointer_delta(mr.end(), mr.start()) -
      typeArrayOopDesc::header_size(T_INT);
    const size_t array_length_words =
      array_length * (HeapWordSize/sizeof(jint));
    post_allocation_setup_array(Universe::intArrayKlassObj(),
				mr.start(),
				mr.word_size(),
				(int)array_length_words);
#ifdef ASSERT
    HeapWord* elt_words = (mr.start() + typeArrayOopDesc::header_size(T_INT));
    Memory::set_words(elt_words, array_length, 0xDEAFBABE);
#endif
  } else {
    assert(word_size == (size_t)oopDesc::header_size(), "Unaligned?");
    post_allocation_setup_obj(SystemDictionary::object_klass(),
			      mr.start(),
			      mr.word_size());
  }
}
Exemplo n.º 19
0
void PSMarkSweep::allocate_stacks() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSYoungGen* young_gen = heap->young_gen();

  MutableSpace* to_space = young_gen->to_space();
  _preserved_marks = (PreservedMark*)to_space->top();
  _preserved_count = 0;

  // We want to calculate the size in bytes first.
  _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
  // Now divide by the size of a PreservedMark
  _preserved_count_max /= sizeof(PreservedMark);

  _preserved_mark_stack = NULL;
  _preserved_oop_stack = NULL;

  _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);

  int size = SystemDictionary::number_of_classes() * 2;
  _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
  // (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
  // now until we investigate a more optimal setting.
  _revisit_mdo_stack   = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
}
Exemplo n.º 20
0
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parsability.
void MutableNUMASpace::ensure_parsability() {
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
    LGRPSpace *ls = lgrp_spaces()->at(i);
    MutableSpace *s = ls->space();
    if (s->top() < top()) { // For all spaces preceding the one containing top()
      if (s->free_in_words() > 0) {
        intptr_t cur_top = (intptr_t)s->top();
        size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
        while (words_left_to_fill > 0) {
          size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
          assert(words_to_fill >= CollectedHeap::min_fill_size(),
                 "Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
                 words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size());
          CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
          if (!os::numa_has_static_binding()) {
            size_t touched_words = words_to_fill;
#ifndef ASSERT
            if (!ZapUnusedHeapArea) {
              touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
                touched_words);
            }
#endif
            MemRegion invalid;
            HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
            HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
            if (crossing_start != crossing_end) {
              // If object header crossed a small page boundary we mark the area
              // as invalid rounding it to a page_size().
              HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
              HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
              invalid = MemRegion(start, end);
            }

            ls->add_invalid_region(invalid);
          }
          cur_top = cur_top + (words_to_fill * HeapWordSize);
          words_left_to_fill -= words_to_fill;
        }
      }
    } else {
      if (!os::numa_has_static_binding()) {
#ifdef ASSERT
        MemRegion invalid(s->top(), s->end());
        ls->add_invalid_region(invalid);
#else
        if (ZapUnusedHeapArea) {
          MemRegion invalid(s->top(), s->end());
          ls->add_invalid_region(invalid);
        } else {
          return;
        }
#endif
      } else {
          return;
      }
    }
  }
}
Exemplo n.º 21
0
inline void HeapRegion::note_end_of_marking() {
  _prev_top_at_mark_start = _next_top_at_mark_start;
  _prev_marked_bytes = _next_marked_bytes;
  _next_marked_bytes = 0;

  assert(_prev_marked_bytes <=
         (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
         HeapWordSize, "invariant");
}
 // If an allocation of the given "word_sz" can be satisfied within the
 // buffer, do the allocation, returning a pointer to the start of the
 // allocated block.  If the allocation request cannot be satisfied,
 // return NULL.
 HeapWord* allocate(size_t word_sz) {
   HeapWord* res = _top;
   if (pointer_delta(_end, _top) >= word_sz) {
     _top = _top + word_sz;
     return res;
   } else {
     return NULL;
   }
 }
Exemplo n.º 23
0
HeapWord* CompactibleSpace::forward(oop q, size_t size,
                                    CompactPoint* cp, HeapWord* compact_top) {
  // q is alive
  // First check if we should switch compaction space
  assert(this == cp->space, "'this' should be current compaction space.");
  size_t compaction_max_size = pointer_delta(end(), compact_top);
  while (size > compaction_max_size) {
    // switch to next compaction space
    cp->space->set_compaction_top(compact_top);
    cp->space = cp->space->next_compaction_space();
    if (cp->space == NULL) {
      cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
      assert(cp->gen != NULL, "compaction must succeed");
      cp->space = cp->gen->first_compaction_space();
      assert(cp->space != NULL, "generation must have a first compaction space");
    }
    compact_top = cp->space->bottom();
    cp->space->set_compaction_top(compact_top);
    cp->threshold = cp->space->initialize_threshold();
    compaction_max_size = pointer_delta(cp->space->end(), compact_top);
  }

  // store the forwarding pointer into the mark word
  if ((HeapWord*)q != compact_top) {
    q->forward_to(oop(compact_top));
    assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
  } else {
    // if the object isn't moving we can just set the mark to the default
    // mark and handle it specially later on.
    q->init_mark();
    assert(q->forwardee() == NULL, "should be forwarded to NULL");
  }

  VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size));
  compact_top += size;

  // we need to update the offset table so that the beginnings of objects can be
  // found during scavenge.  Note that we are updating the offset table based on
  // where the object will be once the compaction phase finishes.
  if (compact_top > cp->threshold)
    cp->threshold =
      cp->space->cross_threshold(compact_top - size, compact_top);
  return compact_top;
}
Exemplo n.º 24
0
PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL
void Flag::print_on(outputStream* st, bool withComments) {
  // Don't print notproduct and develop flags in a product build.
  if (is_constant_in_binary()) {
    return;
  }

  st->print("%9s %-40s %c= ", _type, _name, (!is_default() ? ':' : ' '));

  if (is_bool()) {
    st->print("%-16s", get_bool() ? "true" : "false");
  }
  if (is_intx()) {
    st->print("%-16ld", get_intx());
  }
  if (is_uintx()) {
    st->print("%-16lu", get_uintx());
  }
  if (is_uint64_t()) {
    st->print("%-16lu", get_uint64_t());
  }
  if (is_double()) {
    st->print("%-16f", get_double());
  }
  if (is_ccstr()) {
    const char* cp = get_ccstr();
    if (cp != NULL) {
      const char* eol;
      while ((eol = strchr(cp, '\n')) != NULL) {
        char format_buffer[FORMAT_BUFFER_LEN];
        size_t llen = pointer_delta(eol, cp, sizeof(char));
        jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
            "%%." SIZE_FORMAT "s", llen);
PRAGMA_DIAG_PUSH
PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
        st->print(format_buffer, cp);
PRAGMA_DIAG_POP
        st->cr();
        cp = eol+1;
        st->print("%5s %-35s += ", "", _name);
      }
      st->print("%-16s", cp);
    }
    else st->print("%-16s", "");
  }

  st->print("%-20s", " ");
  print_kind(st);

  if (withComments) {
#ifndef PRODUCT
    st->print("%s", _doc);
#endif
  }
  st->cr();
}
Exemplo n.º 25
0
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
  assert(!is_null(v), "oop value can never be zero");
  address base = Universe::narrow_oop_base();
  int    shift = Universe::narrow_oop_shift();
  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
  assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
  uint64_t result = pd >> shift;
  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
  return (narrowOop)result;
}
Exemplo n.º 26
0
inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
  char* pc = (char*)p;
  assert(pc >= (char*)_reserved.start() &&
         pc <  (char*)_reserved.end(),
         "p not in range.");
  size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
  size_t result = delta >> LogN;
  assert(result < _vs.committed_size(), "bad index from address");
  return result;
}
Exemplo n.º 27
0
 size_t ptr_2_card_num(const jbyte* card_ptr) {
   assert(card_ptr >= _ct_bot,
          "Invalid card pointer: "
          "card_ptr: " PTR_FORMAT ", "
          "_ct_bot: " PTR_FORMAT,
          p2i(card_ptr), p2i(_ct_bot));
   size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
   assert(card_num < _reserved_max_card_num,
          "card pointer out of range: " PTR_FORMAT, p2i(card_ptr));
   return card_num;
 }
 size_t ptr_2_card_num(const jbyte* card_ptr) {
   assert(card_ptr >= _ct_bot,
          err_msg("Inavalied card pointer: "
                  "card_ptr: " PTR_FORMAT ", "
                  "_ct_bot: " PTR_FORMAT,
                  card_ptr, _ct_bot));
   size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
   check_card_num(card_num,
                  err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
   return card_num;
 }
Exemplo n.º 29
0
// This version requires locking.
inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size,
                                                HeapWord* const end_value) {
  HeapWord* obj = top();
  if (pointer_delta(end_value, obj) >= size) {
    HeapWord* new_top = obj + size;
    set_top(new_top);
    assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
    return obj;
  } else {
    return NULL;
  }
}
Exemplo n.º 30
0
size_t ContiguousSpace::block_size(const HeapWord* p) const {
  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  HeapWord* current_top = top();
  assert(p <= current_top, "p is not a block start");
  assert(p == current_top || oop(p)->is_oop(), "p is not a block start");
  if (p < current_top)
    return oop(p)->size();
  else {
    assert(p == current_top, "just checking");
    return pointer_delta(end(), (HeapWord*) p);
  }
}