コード例 #1
0
void CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel(
  Space* sp,
  MemRegion mr,
  OopsInGenClosure* cl,
  CardTableRS* ct,
  uint n_threads)
{
  if (!mr.is_empty()) {
    if (n_threads > 0) {
#if INCLUDE_ALL_GCS
      non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
#else  // INCLUDE_ALL_GCS
      fatal("Parallel gc not supported here.");
#endif // INCLUDE_ALL_GCS
    } else {
      // clear_cl finds contiguous dirty ranges of cards to process and clear.

      // This is the single-threaded version used by DefNew.
      const bool parallel = false;

      DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
      ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);

      clear_cl.do_MemRegion(mr);
    }
  }
}
コード例 #2
0
  void scanCard(size_t index, HeapRegion *r) {
    _cards_done++;
    DirtyCardToOopClosure* cl =
      r->new_dcto_closure(_oc,
                         CardTableModRefBS::Precise,
                         HeapRegionDCTOC::IntoCSFilterKind);

    // Set the "from" region in the closure.
    _oc->set_region(r);
    HeapWord* card_start = _bot_shared->address_for_index(index);
    HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
    Space *sp = SharedHeap::heap()->space_containing(card_start);
    MemRegion sm_region;
    if (ParallelGCThreads > 0) {
      // first find the used area
      sm_region = sp->used_region_at_save_marks();
    } else {
      // The closure is not idempotent.  We shouldn't look at objects
      // allocated during the GC.
      sm_region = sp->used_region_at_save_marks();
    }
    MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
    if (!mr.is_empty()) {
      cl->do_MemRegion(mr);
    }
  }
コード例 #3
0
void MutableSpace::initialize(MemRegion mr,
                              bool clear_space,
                              bool mangle_space,
                              bool setup_pages) {

    assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
           "invalid space boundaries");

    if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
        // The space may move left and right or expand/shrink.
        // We'd like to enforce the desired page placement.
        MemRegion head, tail;
        if (last_setup_region().is_empty()) {
            // If it's the first initialization don't limit the amount of work.
            head = mr;
            tail = MemRegion(mr.end(), mr.end());
        } else {
            // Is there an intersection with the address space?
            MemRegion intersection = last_setup_region().intersection(mr);
            if (intersection.is_empty()) {
                intersection = MemRegion(mr.end(), mr.end());
            }
            // All the sizes below are in words.
            size_t head_size = 0, tail_size = 0;
            if (mr.start() <= intersection.start()) {
                head_size = pointer_delta(intersection.start(), mr.start());
            }
            if(intersection.end() <= mr.end()) {
                tail_size = pointer_delta(mr.end(), intersection.end());
            }
            // Limit the amount of page manipulation if necessary.
            if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
                const size_t change_size = head_size + tail_size;
                const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
                head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
                                 head_size);
                tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
                                 tail_size);
            }
            head = MemRegion(intersection.start() - head_size, intersection.start());
            tail = MemRegion(intersection.end(), intersection.end() + tail_size);
        }
        assert(mr.contains(head) && mr.contains(tail), "Sanity");

        if (UseNUMA) {
            numa_setup_pages(head, clear_space);
            numa_setup_pages(tail, clear_space);
        }

        if (AlwaysPreTouch) {
            pretouch_pages(head);
            pretouch_pages(tail);
        }

        // Remember where we stopped so that we can continue later.
        set_last_setup_region(MemRegion(head.start(), tail.end()));
    }
inline void CMSBitMap::par_markRange(MemRegion mr) {
  assert_locked();
  mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
  assert(!mr.is_empty(), "unexpected empty region");
  // convert address range into offset range
  size_t start_ofs = heapWordToOffset(mr.start());
  size_t end_ofs = heapWordToOffset(mr.end());
  // Range size is usually just 1 bit.
  _bm.par_set_range(start_ofs, end_ofs, BitMap::small_range);
}
コード例 #5
0
void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
  assert(!mr.is_empty(), "Should be non-empty");
  // We use MemRegion(bottom(), end()) rather than used_region() below
  // because the two are not necessarily equal for some kinds of
  // spaces, in particular, certain kinds of free list spaces.
  // We could use the more complicated but more precise:
  // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
  // but the slight imprecision seems acceptable in the assertion check.
  assert(MemRegion(bottom(), end()).contains(mr),
         "Should be within used space");
  HeapWord* prev = cl->previous();   // max address from last time
  if (prev >= mr.end()) { // nothing to do
    return;
  }
  // This assert will not work when we go from cms space to perm
  // space, and use same closure. Easy fix deferred for later. XXX YSR
  // assert(prev == NULL || contains(prev), "Should be within space");

  bool last_was_obj_array = false;
  HeapWord *blk_start_addr, *region_start_addr;
  if (prev > mr.start()) {
    region_start_addr = prev;
    blk_start_addr    = prev;
    // The previous invocation may have pushed "prev" beyond the
    // last allocated block yet there may be still be blocks
    // in this region due to a particular coalescing policy.
    // Relax the assertion so that the case where the unallocated
    // block is maintained and "prev" is beyond the unallocated
    // block does not cause the assertion to fire.
    assert((BlockOffsetArrayUseUnallocatedBlock &&
            (!is_in(prev))) ||
           (blk_start_addr == block_start(region_start_addr)), "invariant");
  } else {
    region_start_addr = mr.start();
    blk_start_addr    = block_start(region_start_addr);
  }
  HeapWord* region_end_addr = mr.end();
  MemRegion derived_mr(region_start_addr, region_end_addr);
  while (blk_start_addr < region_end_addr) {
    const size_t size = block_size(blk_start_addr);
    if (block_is_obj(blk_start_addr)) {
      last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
    } else {
      last_was_obj_array = false;
    }
    blk_start_addr += size;
  }
  if (!last_was_obj_array) {
    assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
           "Should be within (closed) used space");
    assert(blk_start_addr > prev, "Invariant");
    cl->set_previous(blk_start_addr); // min address for next time
  }
}
コード例 #6
0
void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
    if (!mr.is_empty()) {
        size_t page_size = UseLargePages ? alignment() : os::vm_page_size();
        HeapWord *start = (HeapWord*)round_to((intptr_t) mr.start(), page_size);
        HeapWord *end =  (HeapWord*)round_down((intptr_t) mr.end(), page_size);
        if (end > start) {
            size_t size = pointer_delta(end, start, sizeof(char));
            if (clear_space) {
                // Prefer page reallocation to migration.
                os::free_memory((char*)start, size, page_size);
            }
            os::numa_make_global((char*)start, size);
        }
    }
}
コード例 #7
0
void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
  if (!mr.is_empty()) {
    jbyte* cur_entry = byte_for(mr.start());
    jbyte* limit = byte_after(mr.last());
    // The region mr may not start on a card boundary so
    // the first card may reflect a write to the space
    // just prior to mr.
    if (!is_aligned(mr.start())) {
      cur_entry++;
    }
    for (;cur_entry < limit; cur_entry++) {
      guarantee(*cur_entry == CardTableModRefBS::clean_card,
                "Unexpected dirty card found");
    }
  }
}
コード例 #8
0
MemRegion MemRegion::_union(const MemRegion mr2) const {
  // If one region is empty, return the other
  if (is_empty()) return mr2;
  if (mr2.is_empty()) return MemRegion(start(), end());

  // Otherwise, regions must overlap or be adjacent
  assert(((start() <= mr2.start()) && (end() >= mr2.start())) ||
         ((mr2.start() <= start()) && (mr2.end() >= start())),
             "non-adjacent or overlapping regions");
  MemRegion res;
  HeapWord* res_start = MIN2(start(), mr2.start());
  HeapWord* res_end   = MAX2(end(),   mr2.end());
  res.set_start(res_start);
  res.set_end(res_end);
  return res;
}
コード例 #9
0
  void scanCard(size_t index, HeapRegion *r) {
    // Stack allocate the DirtyCardToOopClosure instance
    HeapRegionDCTOC cl(_g1h, r, _oc,
                       CardTableModRefBS::Precise);

    // Set the "from" region in the closure.
    _oc->set_region(r);
    MemRegion card_region(_bot_shared->address_for_index(index), G1BlockOffsetSharedArray::N_words);
    MemRegion pre_gc_allocated(r->bottom(), r->scan_top());
    MemRegion mr = pre_gc_allocated.intersection(card_region);
    if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
      // We make the card as "claimed" lazily (so races are possible
      // but they're benign), which reduces the number of duplicate
      // scans (the rsets of the regions in the cset can intersect).
      _ct_bs->set_card_claimed(index);
      _cards_done++;
      cl.do_MemRegion(mr);
    }
  }
コード例 #10
0
void ContiguousSpace::oop_iterate(MemRegion mr, OopClosure* blk) {
  if (is_empty()) {
    return;
  }
  MemRegion cur = MemRegion(bottom(), top());
  mr = mr.intersection(cur);
  if (mr.is_empty()) {
    return;
  }
  if (mr.equals(cur)) {
    oop_iterate(blk);
    return;
  }
  assert(mr.end() <= top(), "just took an intersection above");
  HeapWord* obj_addr = block_start(mr.start());
  HeapWord* t = mr.end();

  // Handle first object specially.
  oop obj = oop(obj_addr);
  SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
  obj_addr += obj->oop_iterate(&smr_blk);
  while (obj_addr < t) {
    oop obj = oop(obj_addr);
    assert(obj->is_oop(), "expected an oop");
    obj_addr += obj->size();
    // If "obj_addr" is not greater than top, then the
    // entire object "obj" is within the region.
    if (obj_addr <= t) {
      obj->oop_iterate(blk);
    } else {
      // "obj" extends beyond end of region
      obj->oop_iterate(&smr_blk);
      break;
    }
  };
}
コード例 #11
0
void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
  assert(!mr.is_empty(), "Should be non-empty");
  assert(used_region().contains(mr), "Should be within used space");
  HeapWord* prev = cl->previous();   // max address from last time
  if (prev >= mr.end()) { // nothing to do
    return;
  }
  // See comment above (in more general method above) in case you
  // happen to use this method.
  assert(prev == NULL || is_in_reserved(prev), "Should be within space");

  bool last_was_obj_array = false;
  HeapWord *obj_start_addr, *region_start_addr;
  if (prev > mr.start()) {
    region_start_addr = prev;
    obj_start_addr    = prev;
    assert(obj_start_addr == block_start(region_start_addr), "invariant");
  } else {
    region_start_addr = mr.start();
    obj_start_addr    = block_start(region_start_addr);
  }
  HeapWord* region_end_addr = mr.end();
  MemRegion derived_mr(region_start_addr, region_end_addr);
  while (obj_start_addr < region_end_addr) {
    oop obj = oop(obj_start_addr);
    const size_t size = obj->size();
    last_was_obj_array = cl->do_object_bm(obj, derived_mr);
    obj_start_addr += size;
  }
  if (!last_was_obj_array) {
    assert((bottom() <= obj_start_addr)  && (obj_start_addr <= end()),
           "Should be within (closed) used space");
    assert(obj_start_addr > prev, "Invariant");
    cl->set_previous(obj_start_addr); // min address for next time
  }
}
コード例 #12
0
void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {

  // Some collectors need to do special things whenever their dirty
  // cards are processed. For instance, CMS must remember mutator updates
  // (i.e. dirty cards) so as to re-scan mutated objects.
  // Such work can be piggy-backed here on dirty card scanning, so as to make
  // it slightly more efficient than doing a complete non-detructive pre-scan
  // of the card table.
  MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
  if (pCl != NULL) {
    pCl->do_MemRegion(mr);
  }

  HeapWord* bottom = mr.start();
  HeapWord* last = mr.last();
  HeapWord* top = mr.end();
  HeapWord* bottom_obj;
  HeapWord* top_obj;

  assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
         _precision == CardTableModRefBS::Precise,
         "Only ones we deal with for now.");

  assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
         _cl->idempotent() || _last_bottom == NULL ||
         top <= _last_bottom,
         "Not decreasing");
  NOT_PRODUCT(_last_bottom = mr.start());

  bottom_obj = _sp->block_start(bottom);
  top_obj    = _sp->block_start(last);

  assert(bottom_obj <= bottom, "just checking");
  assert(top_obj    <= top,    "just checking");

  // Given what we think is the top of the memory region and
  // the start of the object at the top, get the actual
  // value of the top.
  top = get_actual_top(top, top_obj);

  // If the previous call did some part of this region, don't redo.
  if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
      _min_done != NULL &&
      _min_done < top) {
    top = _min_done;
  }

  // Top may have been reset, and in fact may be below bottom,
  // e.g. the dirty card region is entirely in a now free object
  // -- something that could happen with a concurrent sweeper.
  bottom = MIN2(bottom, top);
  mr     = MemRegion(bottom, top);
  assert(bottom <= top &&
         (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
          _min_done == NULL ||
          top <= _min_done),
         "overlap!");

  // Walk the region if it is not empty; otherwise there is nothing to do.
  if (!mr.is_empty()) {
    walk_mem_region(mr, bottom_obj, top);
  }

  // An idempotent closure might be applied in any order, so we don't
  // record a _min_done for it.
  if (!_cl->idempotent()) {
    _min_done = bottom;
  } else {
    assert(_min_done == _last_explicit_min_done,
           "Don't update _min_done for idempotent cl");
  }
}