Esempio n. 1
0
void CardTableRS::younger_refs_in_space_iterate(Space* sp,
                                                OopsInGenClosure* cl) {
  const MemRegion urasm = sp->used_region_at_save_marks();
#ifdef ASSERT
  // Convert the assertion check to a warning if we are running
  // CMS+ParNew until related bug is fixed.
  MemRegion ur    = sp->used_region();
  assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC),
         err_msg("Did you forget to call save_marks()? "
                 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
                 "[" PTR_FORMAT ", " PTR_FORMAT ")",
                 urasm.start(), urasm.end(), ur.start(), ur.end()));
  // In the case of CMS+ParNew, issue a warning
  if (!ur.contains(urasm)) {
    assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above");
    warning("CMS+ParNew: Did you forget to call save_marks()? "
            "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
            "[" PTR_FORMAT ", " PTR_FORMAT ")",
             urasm.start(), urasm.end(), ur.start(), ur.end());
    MemRegion ur2 = sp->used_region();
    MemRegion urasm2 = sp->used_region_at_save_marks();
    if (!ur.equals(ur2)) {
      warning("CMS+ParNew: Flickering used_region()!!");
    }
    if (!urasm.equals(urasm2)) {
      warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
    }
    ShouldNotReachHere();
  }
#endif
  _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
}
void ObjArrayKlass::oop_oop_iterate_elements_bounded(objArrayOop a, OopClosureType* closure, MemRegion mr) {
  if (UseCompressedOops) {
    oop_oop_iterate_elements_specialized_bounded<nv, narrowOop>(a, closure, mr.start(), mr.end());
  } else {
    oop_oop_iterate_elements_specialized_bounded<nv, oop>(a, closure, mr.start(), mr.end());
  }
}
Esempio n. 3
0
void SharedHeap::fill_region_with_object(MemRegion mr) {
  // Disable allocation events, since this isn't a "real" allocation.
  JVMPIAllocEventDisabler dis;  

  size_t word_size = mr.word_size();
  size_t aligned_array_header_size =
    align_object_size(typeArrayOopDesc::header_size(T_INT));

  if (word_size >= aligned_array_header_size) {
    const size_t array_length =
      pointer_delta(mr.end(), mr.start()) -
      typeArrayOopDesc::header_size(T_INT);
    const size_t array_length_words =
      array_length * (HeapWordSize/sizeof(jint));
    post_allocation_setup_array(Universe::intArrayKlassObj(),
				mr.start(),
				mr.word_size(),
				(int)array_length_words);
#ifdef ASSERT
    HeapWord* elt_words = (mr.start() + typeArrayOopDesc::header_size(T_INT));
    Memory::set_words(elt_words, array_length, 0xDEAFBABE);
#endif
  } else {
    assert(word_size == (size_t)oopDesc::header_size(), "Unaligned?");
    post_allocation_setup_obj(SystemDictionary::object_klass(),
			      mr.start(),
			      mr.word_size());
  }
}
void
CardTableModRefBS::
process_stride(Space* sp,
               MemRegion used,
               jint stride, int n_strides,
               DirtyCardToOopClosure* dcto_cl,
               MemRegionClosure* cl,
               bool clear,
               jbyte** lowest_non_clean,
               uintptr_t lowest_non_clean_base_chunk_index,
               size_t    lowest_non_clean_chunk_size) {
  // We don't have to go downwards here; it wouldn't help anyway,
  // because of parallelism.

  // Find the first card address of the first chunk in the stride that is
  // at least "bottom" of the used region.
  jbyte*    start_card  = byte_for(used.start());
  jbyte*    end_card    = byte_after(used.last());
  uintptr_t start_chunk = addr_to_chunk_index(used.start());
  uintptr_t start_chunk_stride_num = start_chunk % n_strides;
  jbyte* chunk_card_start;

  if ((uintptr_t)stride >= start_chunk_stride_num) {
    chunk_card_start = (jbyte*)(start_card +
                                (stride - start_chunk_stride_num) *
                                CardsPerStrideChunk);
  } else {
    // Go ahead to the next chunk group boundary, then to the requested stride.
    chunk_card_start = (jbyte*)(start_card +
                                (n_strides - start_chunk_stride_num + stride) *
                                CardsPerStrideChunk);
  }

  while (chunk_card_start < end_card) {
    // We don't have to go downwards here; it wouldn't help anyway,
    // because of parallelism.  (We take care with "min_done"; see below.)
    // Invariant: chunk_mr should be fully contained within the "used" region.
    jbyte*    chunk_card_end = chunk_card_start + CardsPerStrideChunk;
    MemRegion chunk_mr       = MemRegion(addr_for(chunk_card_start),
                                         chunk_card_end >= end_card ?
                                           used.end() : addr_for(chunk_card_end));
    assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
    assert(used.contains(chunk_mr), "chunk_mr should be subset of used");

    // Process the chunk.
    process_chunk_boundaries(sp,
                             dcto_cl,
                             chunk_mr,
                             used,
                             lowest_non_clean,
                             lowest_non_clean_base_chunk_index,
                             lowest_non_clean_chunk_size);

    non_clean_card_iterate_work(chunk_mr, cl, clear);

    // Find the next chunk of the stride.
    chunk_card_start += CardsPerStrideChunk * n_strides;
  }
}
void CardTableRS::clear_MemRegion(MemRegion mr) {
  jbyte* cur  = byte_for(mr.start());
  jbyte* last = byte_after(mr.last());
  assert(addr_for(cur) == mr.start(), "region must be card aligned");
  while (cur < last) {
    *cur = CardTableModRefBS::clean_card;
    cur++;
  }
}
Esempio n. 6
0
// The buffer comes with its own BOT, with a shared (obviously) underlying
// BlockOffsetSharedArray. We manipulate this BOT in the normal way
// as we would for any contiguous space. However, on accasion we
// need to do some buffer surgery at the extremities before we
// start using the body of the buffer for allocations. Such surgery
// (as explained elsewhere) is to prevent allocation on a card that
// is in the process of being walked concurrently by another GC thread.
// When such surgery happens at a point that is far removed (to the
// right of the current allocation point, top), we use the "contig"
// parameter below to directly manipulate the shared array without
// modifying the _next_threshold state in the BOT.
void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
                                                     bool contig) {
  CollectedHeap::fill_with_object(mr);
  if (contig) {
    _bt.alloc_block(mr.start(), mr.end());
  } else {
    _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
  }
}
void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
  // We don't need to do young-gen spaces.
  if (s->end() <= gen_boundary) return;
  MemRegion used = s->used_region();

  jbyte* cur_entry = byte_for(used.start());
  jbyte* limit = byte_after(used.last());
  while (cur_entry < limit) {
    if (*cur_entry == CardTableModRefBS::clean_card) {
      jbyte* first_dirty = cur_entry+1;
      while (first_dirty < limit &&
	     *first_dirty == CardTableModRefBS::clean_card)
	first_dirty++;
      // If the first object is a regular object, and it has a
      // young-to-old field, that would mark the previous card.
      HeapWord* boundary = addr_for(cur_entry);
      HeapWord* end = addr_for(first_dirty);
      HeapWord* boundary_block = s->block_start(boundary);
      HeapWord* begin = boundary;             // Until proven otherwise.
      HeapWord* start_block = boundary_block; // Until proven otherwise.
      if (boundary_block < boundary) {
	if (s->block_is_obj(boundary_block)) {
	  oop boundary_obj = oop(boundary_block);
	  if (!boundary_obj->is_objArray() &&
	      !boundary_obj->is_typeArray()) {
	    guarantee(cur_entry > byte_for(used.start()),
		      "else boundary would be boundary_block");
	    if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
	      begin = boundary_block + s->block_size(boundary_block);
	      start_block = begin;
	    }
	  }
	}
      }
      // Now traverse objects until end.
      HeapWord* cur = start_block;
      VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
      while (cur < end) {
	if (s->block_is_obj(cur)) {
	  oop(cur)->oop_iterate(&verify_blk);
	}
	cur += s->block_size(cur);
      }
      cur_entry = first_dirty;
    } else {
      guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
		"Illegal CT value");
      // If we're in the parallel case, the cur and prev values are
      // different, and we can't have left a prev in the table.
      guarantee(cur_youngergen_card_val() == youngergen_card
		|| !is_prev_youngergen_card_val(*cur_entry),
		"Illegal CT value");
      cur_entry++;
    }
  }
}
Esempio n. 8
0
void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
  assert(!mr.is_empty(), "Should be non-empty");
  // We use MemRegion(bottom(), end()) rather than used_region() below
  // because the two are not necessarily equal for some kinds of
  // spaces, in particular, certain kinds of free list spaces.
  // We could use the more complicated but more precise:
  // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
  // but the slight imprecision seems acceptable in the assertion check.
  assert(MemRegion(bottom(), end()).contains(mr),
         "Should be within used space");
  HeapWord* prev = cl->previous();   // max address from last time
  if (prev >= mr.end()) { // nothing to do
    return;
  }
  // This assert will not work when we go from cms space to perm
  // space, and use same closure. Easy fix deferred for later. XXX YSR
  // assert(prev == NULL || contains(prev), "Should be within space");

  bool last_was_obj_array = false;
  HeapWord *blk_start_addr, *region_start_addr;
  if (prev > mr.start()) {
    region_start_addr = prev;
    blk_start_addr    = prev;
    // The previous invocation may have pushed "prev" beyond the
    // last allocated block yet there may be still be blocks
    // in this region due to a particular coalescing policy.
    // Relax the assertion so that the case where the unallocated
    // block is maintained and "prev" is beyond the unallocated
    // block does not cause the assertion to fire.
    assert((BlockOffsetArrayUseUnallocatedBlock &&
            (!is_in(prev))) ||
           (blk_start_addr == block_start(region_start_addr)), "invariant");
  } else {
    region_start_addr = mr.start();
    blk_start_addr    = block_start(region_start_addr);
  }
  HeapWord* region_end_addr = mr.end();
  MemRegion derived_mr(region_start_addr, region_end_addr);
  while (blk_start_addr < region_end_addr) {
    const size_t size = block_size(blk_start_addr);
    if (block_is_obj(blk_start_addr)) {
      last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
    } else {
      last_was_obj_array = false;
    }
    blk_start_addr += size;
  }
  if (!last_was_obj_array) {
    assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
           "Should be within (closed) used space");
    assert(blk_start_addr > prev, "Invariant");
    cl->set_previous(blk_start_addr); // min address for next time
  }
}
Esempio n. 9
0
void MutableSpace::initialize(MemRegion mr,
                              bool clear_space,
                              bool mangle_space,
                              bool setup_pages) {

  assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
         "invalid space boundaries");

  if (setup_pages && (UseNUMA || AlwaysPreTouch || UseColoredSpaces)) {
    // The space may move left and right or expand/shrink.
    // We'd like to enforce the desired page placement.
    MemRegion head, tail;
    if (last_setup_region().is_empty()) {
      // If it's the first initialization don't limit the amount of work.
      head = mr;
      tail = MemRegion(mr.end(), mr.end());
    } else {
      // Is there an intersection with the address space?
      MemRegion intersection = last_setup_region().intersection(mr);
      if (intersection.is_empty()) {
        intersection = MemRegion(mr.end(), mr.end());
      }
      // All the sizes below are in words.
      size_t head_size = 0, tail_size = 0;
      if (mr.start() <= intersection.start()) {
        head_size = pointer_delta(intersection.start(), mr.start());
      }
      if(intersection.end() <= mr.end()) {
        tail_size = pointer_delta(mr.end(), intersection.end());
      }
      // Limit the amount of page manipulation if necessary.
      if (UseColoredSpaces) {
        if (ColoredSpaceResizeRate > 0 && !AlwaysPreTouch) {
          const size_t change_size = head_size + tail_size;
          const float setup_rate_words = ColoredSpaceResizeRate >> LogBytesPerWord;
          head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
                           head_size);
          tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
                           tail_size);
        }
      } else {
        if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
          const size_t change_size = head_size + tail_size;
          const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
          head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
                           head_size);
          tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
                           tail_size);
        }
      }
      head = MemRegion(intersection.start() - head_size, intersection.start());
      tail = MemRegion(intersection.end(), intersection.end() + tail_size);
    }
Esempio n. 10
0
// Simply mangle the MemRegion mr.
void SpaceMangler::mangle_region(MemRegion mr) {
  assert(ZapUnusedHeapArea, "Mangling should not be in use");
#ifdef ASSERT
  if(TraceZapUnusedHeapArea) {
    gclog_or_tty->print("Mangling [" PTR_FORMAT " to " PTR_FORMAT ")", p2i(mr.start()), p2i(mr.end()));
  }
  Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord);
  if(TraceZapUnusedHeapArea) {
    gclog_or_tty->print_cr(" done");
  }
#endif
}
Esempio n. 11
0
void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
  assert(mr.word_size() > 0, "Error");
  assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
  // mr.end() may not necessarily be card aligned.
  jbyte* cur_entry = _ct->byte_for(mr.last());
  const jbyte* limit = _ct->byte_for(mr.start());
  HeapWord* end_of_non_clean = mr.end();
  HeapWord* start_of_non_clean = end_of_non_clean;
  while (cur_entry >= limit) {
    HeapWord* cur_hw = _ct->addr_for(cur_entry);
    if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) {
      // Continue the dirty range by opening the
      // dirty window one card to the left.
      start_of_non_clean = cur_hw;
    } else {
      // We hit a "clean" card; process any non-empty
      // "dirty" range accumulated so far.
      if (start_of_non_clean < end_of_non_clean) {
        const MemRegion mrd(start_of_non_clean, end_of_non_clean);
        _dirty_card_closure->do_MemRegion(mrd);
      }

      // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
      if (is_word_aligned(cur_entry)) {
        jbyte* cur_row = cur_entry - BytesPerWord;
        while (cur_row >= limit && *((intptr_t*)cur_row) ==  CardTableRS::clean_card_row()) {
          cur_row -= BytesPerWord;
        }
        cur_entry = cur_row + BytesPerWord;
        cur_hw = _ct->addr_for(cur_entry);
      }

      // Reset the dirty window, while continuing to look
      // for the next dirty card that will start a
      // new dirty window.
      end_of_non_clean = cur_hw;
      start_of_non_clean = cur_hw;
    }
    // Note that "cur_entry" leads "start_of_non_clean" in
    // its leftward excursion after this point
    // in the loop and, when we hit the left end of "mr",
    // will point off of the left end of the card-table
    // for "mr".
    cur_entry--;
  }
  // If the first card of "mr" was dirty, we will have
  // been left with a dirty window, co-initial with "mr",
  // which we now process.
  if (start_of_non_clean < end_of_non_clean) {
    const MemRegion mrd(start_of_non_clean, end_of_non_clean);
    _dirty_card_closure->do_MemRegion(mrd);
  }
}
G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
                                       MemRegion mr, bool init_to_zero) :
  G1BlockOffsetTable(mr.start(), mr.end()),
  _unallocated_block(_bottom),
  _array(array), _csp(NULL),
  _init_to_zero(init_to_zero) {
  assert(_bottom <= _end, "arguments out of order");
  if (!_init_to_zero) {
    // initialize cards to point back to mr.start()
    set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
    _array->set_offset_array(0, 0);  // set first card to 0
  }
}
Esempio n. 13
0
BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array,
                                   MemRegion mr, bool init_to_zero_) :
  BlockOffsetTable(mr.start(), mr.end()),
  _array(array)
{
  assert(_bottom <= _end, "arguments out of order");
  set_init_to_zero(init_to_zero_);
  if (!init_to_zero_) {
    // initialize cards to point back to mr.start()
    set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
    _array->set_offset_array(0, 0);  // set first card to 0
  }
}
Esempio n. 14
0
G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
                                       MemRegion mr) :
  G1BlockOffsetTable(mr.start(), mr.end()),
  _unallocated_block(_bottom),
  _array(array), _gsp(NULL) {
  assert(_bottom <= _end, "arguments out of order");
}
Esempio n. 15
0
MemRegion MemRegion::_union(const MemRegion mr2) const {
  // If one region is empty, return the other
  if (is_empty()) return mr2;
  if (mr2.is_empty()) return MemRegion(start(), end());

  // Otherwise, regions must overlap or be adjacent
  assert(((start() <= mr2.start()) && (end() >= mr2.start())) ||
         ((mr2.start() <= start()) && (mr2.end() >= start())),
             "non-adjacent or overlapping regions");
  MemRegion res;
  HeapWord* res_start = MIN2(start(), mr2.start());
  HeapWord* res_end   = MAX2(end(),   mr2.end());
  res.set_start(res_start);
  res.set_end(res_end);
  return res;
}
// This is the shared initialization code. It sets up the basic pointers,
// and allows enough extra space for a filler object. We call a virtual
// method, "lab_is_valid()" to handle the different asserts the old/young
// labs require. 
void PSPromotionLAB::initialize(MemRegion lab) {
  assert(lab_is_valid(lab), "Sanity");

  HeapWord* bottom = lab.start();
  HeapWord* end    = lab.end();

  set_bottom(bottom);
  set_end(end);
  set_top(bottom);

  // We can be initialized to a zero size!
  if (free() > 0) {
    if (ZapUnusedHeapArea) {
      debug_only(Memory::set_words(top(), free()/HeapWordSize, badHeapWord));
    }
    
    // NOTE! We need to allow space for a filler object.
    assert(lab.word_size() >= filler_header_size, "lab is too small");
    end = end - filler_header_size;
    set_end(end);

    _state = needs_flush;
  } else {
    _state = zero_size;
  }

  assert(this->top() <= this->end(), "pointers out of order");
}
Esempio n. 17
0
void
G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
    jbyte* byte = byte_for(mr.start());
    jbyte* last_byte = byte_for(mr.last());
    Thread* thr = Thread::current();
    if (whole_heap) {
        while (byte <= last_byte) {
            *byte = dirty_card;
            byte++;
        }
    } else {
        // Enqueue if necessary.
        if (thr->is_Java_thread()) {
            JavaThread* jt = (JavaThread*)thr;
            while (byte <= last_byte) {
                if (*byte != dirty_card) {
                    *byte = dirty_card;
                    jt->dirty_card_queue().enqueue(byte);
                }
                byte++;
            }
        } else {
            MutexLockerEx x(Shared_DirtyCardQ_lock,
                            Mutex::_no_safepoint_check_flag);
            while (byte <= last_byte) {
                if (*byte != dirty_card) {
                    *byte = dirty_card;
                    _dcqs.shared_dirty_card_queue()->enqueue(byte);
                }
                byte++;
            }
        }
    }
}
void
CardTableModRefBS::
process_chunk_boundaries(Space* sp,
                         DirtyCardToOopClosure* dcto_cl,
                         MemRegion chunk_mr,
                         MemRegion used,
                         jbyte** lowest_non_clean,
                         uintptr_t lowest_non_clean_base_chunk_index,
                         size_t    lowest_non_clean_chunk_size)
{
  // We must worry about non-array objects that cross chunk boundaries,
  // because such objects are both precisely and imprecisely marked:
  // .. if the head of such an object is dirty, the entire object
  //    needs to be scanned, under the interpretation that this
  //    was an imprecise mark
  // .. if the head of such an object is not dirty, we can assume
  //    precise marking and it's efficient to scan just the dirty
  //    cards.
  // In either case, each scanned reference must be scanned precisely
  // once so as to avoid cloning of a young referent. For efficiency,
  // our closures depend on this property and do not protect against
  // double scans.

  uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());
  cur_chunk_index           = cur_chunk_index - lowest_non_clean_base_chunk_index;

  NOISY(tty->print_cr("===========================================================================");)
Esempio n. 19
0
void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
  if (!mr.is_empty()) {
    jbyte* cur_entry = byte_for(mr.start());
    jbyte* limit = byte_after(mr.last());
    // The region mr may not start on a card boundary so
    // the first card may reflect a write to the space
    // just prior to mr.
    if (!is_aligned(mr.start())) {
      cur_entry++;
    }
    for (;cur_entry < limit; cur_entry++) {
      guarantee(*cur_entry == CardTableModRefBS::clean_card,
                "Unexpected dirty card found");
    }
  }
}
Esempio n. 20
0
// This is the shared initialization code. It sets up the basic pointers,
// and allows enough extra space for a filler object. We call a virtual
// method, "lab_is_valid()" to handle the different asserts the old/young
// labs require.
void PSPromotionLAB::initialize(MemRegion lab) {
  assert(lab_is_valid(lab), "Sanity");

  HeapWord* bottom = lab.start();
  HeapWord* end    = lab.end();

  set_bottom(bottom);
  set_end(end);
  set_top(bottom);

  // Initialize after VM starts up because header_size depends on compressed
  // oops.
  filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));

  // We can be initialized to a zero size!
  if (free() > 0) {
    if (ZapUnusedHeapArea) {
      debug_only(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord));
    }

    // NOTE! We need to allow space for a filler object.
    assert(lab.word_size() >= filler_header_size, "lab is too small");
    end = end - filler_header_size;
    set_end(end);

    _state = needs_flush;
  } else {
    _state = zero_size;
  }

  assert(this->top() <= this->end(), "pointers out of order");
}
inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
  // Align the end of mr so it's at a card boundary.
  // This is superfluous except at the end of the space;
  // we should do better than this XXX
  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
                 CardTableModRefBS::card_size /* bytes */));
  _t->par_mark_range(mr2);
}
void Space::initialize(MemRegion mr, bool clear_space) {
  HeapWord* bottom = mr.start();
  HeapWord* end    = mr.end();
  assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
         "invalid space boundaries");
  set_bottom(bottom);
  set_end(end);
  if (clear_space) clear();
}
Esempio n. 23
0
void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
  HeapWord* p = mr.start();
  HeapWord* e = mr.end();
  oop obj;
  while (p < e) {
    obj = oop(p);
    p += obj->oop_iterate(cl);
  }
  assert(p == e, "bad memregion: doesn't end on obj boundary");
}
inline void CMSBitMap::par_markRange(MemRegion mr) {
  assert_locked();
  mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
  assert(!mr.is_empty(), "unexpected empty region");
  // convert address range into offset range
  size_t start_ofs = heapWordToOffset(mr.start());
  size_t end_ofs = heapWordToOffset(mr.end());
  // Range size is usually just 1 bit.
  _bm.par_set_range(start_ofs, end_ofs, BitMap::small_range);
}
void ImmutableSpace::initialize(MemRegion mr) {
  HeapWord* bottom = mr.start();
  HeapWord* end    = mr.end();

  assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
         "invalid space boundaries");

  _bottom = bottom;
  _end = end;
}
Esempio n. 26
0
MemRegion MemRegion::intersection(const MemRegion mr2) const {
  MemRegion res;
  HeapWord* res_start = MAX2(start(), mr2.start());
  HeapWord* res_end   = MIN2(end(),   mr2.end());
  if (res_start < res_end) {
    res.set_start(res_start);
    res.set_end(res_end);
  }
  return res;
}
Esempio n. 27
0
void
CardTableModRefBS::
get_LNC_array_for_space(Space* sp,
                        jbyte**& lowest_non_clean,
                        uintptr_t& lowest_non_clean_base_chunk_index,
                        size_t& lowest_non_clean_chunk_size) {

  int       i        = find_covering_region_containing(sp->bottom());
  MemRegion covered  = _covered[i];
  size_t    n_chunks = chunks_to_cover(covered);

  // Only the first thread to obtain the lock will resize the
  // LNC array for the covered region.  Any later expansion can't affect
  // the used_at_save_marks region.
  // (I observed a bug in which the first thread to execute this would
  // resize, and then it would cause "expand_and_allocates" that would
  // Increase the number of chunks in the covered region.  Then a second
  // thread would come and execute this, see that the size didn't match,
  // and free and allocate again.  So the first thread would be using a
  // freed "_lowest_non_clean" array.)

  // Do a dirty read here. If we pass the conditional then take the rare
  // event lock and do the read again in case some other thread had already
  // succeeded and done the resize.
  int cur_collection = Universe::heap()->total_collections();
  if (_last_LNC_resizing_collection[i] != cur_collection) {
    MutexLocker x(ParGCRareEvent_lock);
    if (_last_LNC_resizing_collection[i] != cur_collection) {
      if (_lowest_non_clean[i] == NULL ||
          n_chunks != _lowest_non_clean_chunk_size[i]) {

        // Should we delete the old?
        if (_lowest_non_clean[i] != NULL) {
          assert(n_chunks != _lowest_non_clean_chunk_size[i],
                 "logical consequence");
          FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
          _lowest_non_clean[i] = NULL;
        }
        // Now allocate a new one if necessary.
        if (_lowest_non_clean[i] == NULL) {
          _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks);
          _lowest_non_clean_chunk_size[i]       = n_chunks;
          _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
          for (int j = 0; j < (int)n_chunks; j++)
            _lowest_non_clean[i][j] = NULL;
        }
      }
      _last_LNC_resizing_collection[i] = cur_collection;
    }
  }
  // In any case, now do the initialization.
  lowest_non_clean                  = _lowest_non_clean[i];
  lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
  lowest_non_clean_chunk_size       = _lowest_non_clean_chunk_size[i];
}
void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
  jbyte *const first = byte_for(mr.start());
  jbyte *const last = byte_after(mr.last());

  // Below we may use an explicit loop instead of memset() because on
  // certain platforms memset() can give concurrent readers phantom zeros.
  if (UseMemSetInBOT) {
    memset(first, g1_young_gen, last - first);
  } else {
    for (jbyte* i = first; i < last; i++) {
      *i = g1_young_gen;
    }
  }
}
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
                                                             OopsInGenClosure* cl,
                                                             CardTableRS* ct,
                                                             int n_threads) {
  assert(n_threads > 0, "Error: expected n_threads > 0");
  assert((n_threads == 1 && ParallelGCThreads == 0) ||
         n_threads <= (int)ParallelGCThreads,
         "# worker threads != # requested!");
  assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread");
  assert(UseDynamicNumberOfGCThreads ||
         !FLAG_IS_DEFAULT(ParallelGCThreads) ||
         n_threads == (int)ParallelGCThreads,
         "# worker threads != # requested!");
  // Make sure the LNC array is valid for the space.
  jbyte**   lowest_non_clean;
  uintptr_t lowest_non_clean_base_chunk_index;
  size_t    lowest_non_clean_chunk_size;
  get_LNC_array_for_space(sp, lowest_non_clean,
                          lowest_non_clean_base_chunk_index,
                          lowest_non_clean_chunk_size);

  uint n_strides = n_threads * ParGCStridesPerThread;
  SequentialSubTasksDone* pst = sp->par_seq_tasks();
  // Sets the condition for completion of the subtask (how many threads
  // need to finish in order to be done).
  pst->set_n_threads(n_threads);
  pst->set_n_tasks(n_strides);

  uint stride = 0;
  while (!pst->is_task_claimed(/* reference */ stride)) {
    process_stride(sp, mr, stride, n_strides, cl, ct,
                   lowest_non_clean,
                   lowest_non_clean_base_chunk_index,
                   lowest_non_clean_chunk_size);
  }
  if (pst->all_tasks_completed()) {
    // Clear lowest_non_clean array for next time.
    intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
    uintptr_t last_chunk_index  = addr_to_chunk_index(mr.last());
    for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
      intptr_t ind = ch - lowest_non_clean_base_chunk_index;
      assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
             "Bounds error");
      lowest_non_clean[ind] = NULL;
    }
  }
}
Esempio n. 30
0
void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
  assert(!mr.is_empty(), "Should be non-empty");
  assert(used_region().contains(mr), "Should be within used space");
  HeapWord* prev = cl->previous();   // max address from last time
  if (prev >= mr.end()) { // nothing to do
    return;
  }
  // See comment above (in more general method above) in case you
  // happen to use this method.
  assert(prev == NULL || is_in_reserved(prev), "Should be within space");

  bool last_was_obj_array = false;
  HeapWord *obj_start_addr, *region_start_addr;
  if (prev > mr.start()) {
    region_start_addr = prev;
    obj_start_addr    = prev;
    assert(obj_start_addr == block_start(region_start_addr), "invariant");
  } else {
    region_start_addr = mr.start();
    obj_start_addr    = block_start(region_start_addr);
  }
  HeapWord* region_end_addr = mr.end();
  MemRegion derived_mr(region_start_addr, region_end_addr);
  while (obj_start_addr < region_end_addr) {
    oop obj = oop(obj_start_addr);
    const size_t size = obj->size();
    last_was_obj_array = cl->do_object_bm(obj, derived_mr);
    obj_start_addr += size;
  }
  if (!last_was_obj_array) {
    assert((bottom() <= obj_start_addr)  && (obj_start_addr <= end()),
           "Should be within (closed) used space");
    assert(obj_start_addr > prev, "Invariant");
    cl->set_previous(obj_start_addr); // min address for next time
  }
}